summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/android/camera3_hal.cpp2
-rw-r--r--src/android/camera_buffer.h2
-rw-r--r--src/android/camera_capabilities.cpp66
-rw-r--r--src/android/camera_capabilities.h2
-rw-r--r--src/android/camera_device.cpp70
-rw-r--r--src/android/camera_device.h5
-rw-r--r--src/android/camera_hal_config.cpp11
-rw-r--r--src/android/camera_hal_config.h2
-rw-r--r--src/android/camera_hal_manager.cpp11
-rw-r--r--src/android/camera_hal_manager.h2
-rw-r--r--src/android/camera_metadata.cpp2
-rw-r--r--src/android/camera_metadata.h2
-rw-r--r--src/android/camera_ops.cpp2
-rw-r--r--src/android/camera_ops.h2
-rw-r--r--src/android/camera_request.cpp2
-rw-r--r--src/android/camera_request.h5
-rw-r--r--src/android/camera_stream.cpp2
-rw-r--r--src/android/camera_stream.h2
-rw-r--r--src/android/cros/camera3_hal.cpp6
-rw-r--r--src/android/cros_mojo_token.h12
-rw-r--r--src/android/data/nautilus/camera_hal.yaml2
-rw-r--r--src/android/data/soraka/camera_hal.yaml2
-rw-r--r--src/android/frame_buffer_allocator.h9
-rw-r--r--src/android/hal_framebuffer.cpp22
-rw-r--r--src/android/hal_framebuffer.h26
-rw-r--r--src/android/jpeg/encoder.h7
-rw-r--r--src/android/jpeg/encoder_jea.cpp56
-rw-r--r--src/android/jpeg/encoder_jea.h31
-rw-r--r--src/android/jpeg/encoder_libjpeg.cpp15
-rw-r--r--src/android/jpeg/encoder_libjpeg.h5
-rw-r--r--src/android/jpeg/exif.cpp15
-rw-r--r--src/android/jpeg/exif.h7
-rw-r--r--src/android/jpeg/meson.build14
-rw-r--r--src/android/jpeg/post_processor_jpeg.cpp15
-rw-r--r--src/android/jpeg/post_processor_jpeg.h2
-rw-r--r--src/android/jpeg/thumbnailer.cpp2
-rw-r--r--src/android/jpeg/thumbnailer.h2
-rw-r--r--src/android/meson.build6
-rw-r--r--src/android/mm/cros_camera_buffer.cpp2
-rw-r--r--src/android/mm/cros_frame_buffer_allocator.cpp18
-rw-r--r--src/android/mm/generic_camera_buffer.cpp2
-rw-r--r--src/android/mm/generic_frame_buffer_allocator.cpp27
-rw-r--r--src/android/mm/libhardware_stub.c17
-rw-r--r--src/android/mm/meson.build8
-rw-r--r--src/android/post_processor.h2
-rw-r--r--src/android/yuv/post_processor_yuv.cpp2
-rw-r--r--src/android/yuv/post_processor_yuv.h2
-rw-r--r--src/apps/cam/camera_session.cpp (renamed from src/cam/camera_session.cpp)33
-rw-r--r--src/apps/cam/camera_session.h (renamed from src/cam/camera_session.h)4
-rw-r--r--src/apps/cam/capture-script.yaml (renamed from src/cam/capture-script.yaml)29
-rw-r--r--src/apps/cam/capture_script.cpp662
-rw-r--r--src/apps/cam/capture_script.h (renamed from src/cam/capture_script.h)17
-rw-r--r--src/apps/cam/drm.cpp (renamed from src/cam/drm.cpp)41
-rw-r--r--src/apps/cam/drm.h (renamed from src/cam/drm.h)3
-rw-r--r--src/apps/cam/file_sink.cpp (renamed from src/cam/file_sink.cpp)65
-rw-r--r--src/apps/cam/file_sink.h (renamed from src/cam/file_sink.h)11
-rw-r--r--src/apps/cam/frame_sink.cpp (renamed from src/cam/frame_sink.cpp)2
-rw-r--r--src/apps/cam/frame_sink.h (renamed from src/cam/frame_sink.h)2
-rw-r--r--src/apps/cam/kms_sink.cpp (renamed from src/cam/kms_sink.cpp)212
-rw-r--r--src/apps/cam/kms_sink.h (renamed from src/cam/kms_sink.h)18
-rw-r--r--src/apps/cam/main.cpp (renamed from src/cam/main.cpp)31
-rw-r--r--src/apps/cam/main.h (renamed from src/cam/main.h)3
-rw-r--r--src/apps/cam/meson.build (renamed from src/cam/meson.build)25
-rw-r--r--src/apps/cam/sdl_sink.cpp (renamed from src/cam/sdl_sink.cpp)43
-rw-r--r--src/apps/cam/sdl_sink.h (renamed from src/cam/sdl_sink.h)2
-rw-r--r--src/apps/cam/sdl_texture.cpp (renamed from src/cam/sdl_texture.cpp)8
-rw-r--r--src/apps/cam/sdl_texture.h (renamed from src/cam/sdl_texture.h)15
-rw-r--r--src/apps/cam/sdl_texture_mjpg.cpp83
-rw-r--r--src/apps/cam/sdl_texture_mjpg.h (renamed from src/cam/sdl_texture_mjpg.h)10
-rw-r--r--src/apps/cam/sdl_texture_yuv.cpp33
-rw-r--r--src/apps/cam/sdl_texture_yuv.h26
-rw-r--r--src/apps/common/dng_writer.cpp (renamed from src/qcam/dng_writer.cpp)85
-rw-r--r--src/apps/common/dng_writer.h (renamed from src/qcam/dng_writer.h)4
-rw-r--r--src/apps/common/event_loop.cpp (renamed from src/cam/event_loop.cpp)2
-rw-r--r--src/apps/common/event_loop.h (renamed from src/cam/event_loop.h)2
-rw-r--r--src/apps/common/image.cpp (renamed from src/cam/image.cpp)2
-rw-r--r--src/apps/common/image.h (renamed from src/cam/image.h)2
-rw-r--r--src/apps/common/meson.build27
-rw-r--r--src/apps/common/options.cpp (renamed from src/cam/options.cpp)2
-rw-r--r--src/apps/common/options.h (renamed from src/cam/options.h)2
-rw-r--r--src/apps/common/ppm_writer.cpp53
-rw-r--r--src/apps/common/ppm_writer.h20
-rw-r--r--src/apps/common/stream_options.cpp (renamed from src/cam/stream_options.cpp)56
-rw-r--r--src/apps/common/stream_options.h (renamed from src/cam/stream_options.h)11
-rw-r--r--src/apps/ipa-verify/main.cpp64
-rw-r--r--src/apps/ipa-verify/meson.build15
-rw-r--r--src/apps/lc-compliance/environment.cpp (renamed from src/lc-compliance/environment.cpp)2
-rw-r--r--src/apps/lc-compliance/environment.h (renamed from src/lc-compliance/environment.h)2
-rw-r--r--src/apps/lc-compliance/helpers/capture.cpp (renamed from src/lc-compliance/simple_capture.cpp)57
-rw-r--r--src/apps/lc-compliance/helpers/capture.h (renamed from src/lc-compliance/simple_capture.h)19
-rw-r--r--src/apps/lc-compliance/main.cpp (renamed from src/lc-compliance/main.cpp)5
-rw-r--r--src/apps/lc-compliance/meson.build (renamed from src/lc-compliance/meson.build)22
-rw-r--r--src/apps/lc-compliance/tests/capture_test.cpp (renamed from src/lc-compliance/capture_test.cpp)28
-rw-r--r--src/apps/meson.build22
-rw-r--r--src/apps/qcam/assets/feathericons/activity.svg (renamed from src/qcam/assets/feathericons/activity.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/airplay.svg (renamed from src/qcam/assets/feathericons/airplay.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/alert-circle.svg (renamed from src/qcam/assets/feathericons/alert-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/alert-octagon.svg (renamed from src/qcam/assets/feathericons/alert-octagon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/alert-triangle.svg (renamed from src/qcam/assets/feathericons/alert-triangle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/align-center.svg (renamed from src/qcam/assets/feathericons/align-center.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/align-justify.svg (renamed from src/qcam/assets/feathericons/align-justify.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/align-left.svg (renamed from src/qcam/assets/feathericons/align-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/align-right.svg (renamed from src/qcam/assets/feathericons/align-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/anchor.svg (renamed from src/qcam/assets/feathericons/anchor.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/aperture.svg (renamed from src/qcam/assets/feathericons/aperture.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/archive.svg (renamed from src/qcam/assets/feathericons/archive.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-down-circle.svg (renamed from src/qcam/assets/feathericons/arrow-down-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-down-left.svg (renamed from src/qcam/assets/feathericons/arrow-down-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-down-right.svg (renamed from src/qcam/assets/feathericons/arrow-down-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-down.svg (renamed from src/qcam/assets/feathericons/arrow-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-left-circle.svg (renamed from src/qcam/assets/feathericons/arrow-left-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-left.svg (renamed from src/qcam/assets/feathericons/arrow-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-right-circle.svg (renamed from src/qcam/assets/feathericons/arrow-right-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-right.svg (renamed from src/qcam/assets/feathericons/arrow-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-up-circle.svg (renamed from src/qcam/assets/feathericons/arrow-up-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-up-left.svg (renamed from src/qcam/assets/feathericons/arrow-up-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-up-right.svg (renamed from src/qcam/assets/feathericons/arrow-up-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-up.svg (renamed from src/qcam/assets/feathericons/arrow-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/at-sign.svg (renamed from src/qcam/assets/feathericons/at-sign.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/award.svg (renamed from src/qcam/assets/feathericons/award.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bar-chart-2.svg (renamed from src/qcam/assets/feathericons/bar-chart-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bar-chart.svg (renamed from src/qcam/assets/feathericons/bar-chart.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/battery-charging.svg (renamed from src/qcam/assets/feathericons/battery-charging.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/battery.svg (renamed from src/qcam/assets/feathericons/battery.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bell-off.svg (renamed from src/qcam/assets/feathericons/bell-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bell.svg (renamed from src/qcam/assets/feathericons/bell.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bluetooth.svg (renamed from src/qcam/assets/feathericons/bluetooth.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bold.svg (renamed from src/qcam/assets/feathericons/bold.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/book-open.svg (renamed from src/qcam/assets/feathericons/book-open.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/book.svg (renamed from src/qcam/assets/feathericons/book.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bookmark.svg (renamed from src/qcam/assets/feathericons/bookmark.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/box.svg (renamed from src/qcam/assets/feathericons/box.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/briefcase.svg (renamed from src/qcam/assets/feathericons/briefcase.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/calendar.svg (renamed from src/qcam/assets/feathericons/calendar.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/camera-off.svg (renamed from src/qcam/assets/feathericons/camera-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/camera.svg (renamed from src/qcam/assets/feathericons/camera.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cast.svg (renamed from src/qcam/assets/feathericons/cast.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/check-circle.svg (renamed from src/qcam/assets/feathericons/check-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/check-square.svg (renamed from src/qcam/assets/feathericons/check-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/check.svg (renamed from src/qcam/assets/feathericons/check.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevron-down.svg (renamed from src/qcam/assets/feathericons/chevron-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevron-left.svg (renamed from src/qcam/assets/feathericons/chevron-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevron-right.svg (renamed from src/qcam/assets/feathericons/chevron-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevron-up.svg (renamed from src/qcam/assets/feathericons/chevron-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevrons-down.svg (renamed from src/qcam/assets/feathericons/chevrons-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevrons-left.svg (renamed from src/qcam/assets/feathericons/chevrons-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevrons-right.svg (renamed from src/qcam/assets/feathericons/chevrons-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevrons-up.svg (renamed from src/qcam/assets/feathericons/chevrons-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chrome.svg (renamed from src/qcam/assets/feathericons/chrome.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/circle.svg (renamed from src/qcam/assets/feathericons/circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/clipboard.svg (renamed from src/qcam/assets/feathericons/clipboard.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/clock.svg (renamed from src/qcam/assets/feathericons/clock.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-drizzle.svg (renamed from src/qcam/assets/feathericons/cloud-drizzle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-lightning.svg (renamed from src/qcam/assets/feathericons/cloud-lightning.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-off.svg (renamed from src/qcam/assets/feathericons/cloud-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-rain.svg (renamed from src/qcam/assets/feathericons/cloud-rain.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-snow.svg (renamed from src/qcam/assets/feathericons/cloud-snow.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud.svg (renamed from src/qcam/assets/feathericons/cloud.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/code.svg (renamed from src/qcam/assets/feathericons/code.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/codepen.svg (renamed from src/qcam/assets/feathericons/codepen.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/codesandbox.svg (renamed from src/qcam/assets/feathericons/codesandbox.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/coffee.svg (renamed from src/qcam/assets/feathericons/coffee.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/columns.svg (renamed from src/qcam/assets/feathericons/columns.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/command.svg (renamed from src/qcam/assets/feathericons/command.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/compass.svg (renamed from src/qcam/assets/feathericons/compass.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/copy.svg (renamed from src/qcam/assets/feathericons/copy.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-down-left.svg (renamed from src/qcam/assets/feathericons/corner-down-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-down-right.svg (renamed from src/qcam/assets/feathericons/corner-down-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-left-down.svg (renamed from src/qcam/assets/feathericons/corner-left-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-left-up.svg (renamed from src/qcam/assets/feathericons/corner-left-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-right-down.svg (renamed from src/qcam/assets/feathericons/corner-right-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-right-up.svg (renamed from src/qcam/assets/feathericons/corner-right-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-up-left.svg (renamed from src/qcam/assets/feathericons/corner-up-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-up-right.svg (renamed from src/qcam/assets/feathericons/corner-up-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cpu.svg (renamed from src/qcam/assets/feathericons/cpu.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/credit-card.svg (renamed from src/qcam/assets/feathericons/credit-card.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/crop.svg (renamed from src/qcam/assets/feathericons/crop.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/crosshair.svg (renamed from src/qcam/assets/feathericons/crosshair.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/database.svg (renamed from src/qcam/assets/feathericons/database.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/delete.svg (renamed from src/qcam/assets/feathericons/delete.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/disc.svg (renamed from src/qcam/assets/feathericons/disc.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/dollar-sign.svg (renamed from src/qcam/assets/feathericons/dollar-sign.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/download-cloud.svg (renamed from src/qcam/assets/feathericons/download-cloud.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/download.svg (renamed from src/qcam/assets/feathericons/download.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/droplet.svg (renamed from src/qcam/assets/feathericons/droplet.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/edit-2.svg (renamed from src/qcam/assets/feathericons/edit-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/edit-3.svg (renamed from src/qcam/assets/feathericons/edit-3.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/edit.svg (renamed from src/qcam/assets/feathericons/edit.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/external-link.svg (renamed from src/qcam/assets/feathericons/external-link.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/eye-off.svg (renamed from src/qcam/assets/feathericons/eye-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/eye.svg (renamed from src/qcam/assets/feathericons/eye.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/facebook.svg (renamed from src/qcam/assets/feathericons/facebook.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/fast-forward.svg (renamed from src/qcam/assets/feathericons/fast-forward.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/feather.svg (renamed from src/qcam/assets/feathericons/feather.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/feathericons.qrc (renamed from src/qcam/assets/feathericons/feathericons.qrc)0
-rw-r--r--src/apps/qcam/assets/feathericons/figma.svg (renamed from src/qcam/assets/feathericons/figma.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/file-minus.svg (renamed from src/qcam/assets/feathericons/file-minus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/file-plus.svg (renamed from src/qcam/assets/feathericons/file-plus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/file-text.svg (renamed from src/qcam/assets/feathericons/file-text.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/file.svg (renamed from src/qcam/assets/feathericons/file.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/film.svg (renamed from src/qcam/assets/feathericons/film.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/filter.svg (renamed from src/qcam/assets/feathericons/filter.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/flag.svg (renamed from src/qcam/assets/feathericons/flag.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/folder-minus.svg (renamed from src/qcam/assets/feathericons/folder-minus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/folder-plus.svg (renamed from src/qcam/assets/feathericons/folder-plus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/folder.svg (renamed from src/qcam/assets/feathericons/folder.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/framer.svg (renamed from src/qcam/assets/feathericons/framer.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/frown.svg (renamed from src/qcam/assets/feathericons/frown.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/gift.svg (renamed from src/qcam/assets/feathericons/gift.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/git-branch.svg (renamed from src/qcam/assets/feathericons/git-branch.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/git-commit.svg (renamed from src/qcam/assets/feathericons/git-commit.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/git-merge.svg (renamed from src/qcam/assets/feathericons/git-merge.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/git-pull-request.svg (renamed from src/qcam/assets/feathericons/git-pull-request.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/github.svg (renamed from src/qcam/assets/feathericons/github.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/gitlab.svg (renamed from src/qcam/assets/feathericons/gitlab.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/globe.svg (renamed from src/qcam/assets/feathericons/globe.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/grid.svg (renamed from src/qcam/assets/feathericons/grid.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/hard-drive.svg (renamed from src/qcam/assets/feathericons/hard-drive.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/hash.svg (renamed from src/qcam/assets/feathericons/hash.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/headphones.svg (renamed from src/qcam/assets/feathericons/headphones.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/heart.svg (renamed from src/qcam/assets/feathericons/heart.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/help-circle.svg (renamed from src/qcam/assets/feathericons/help-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/hexagon.svg (renamed from src/qcam/assets/feathericons/hexagon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/home.svg (renamed from src/qcam/assets/feathericons/home.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/image.svg (renamed from src/qcam/assets/feathericons/image.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/inbox.svg (renamed from src/qcam/assets/feathericons/inbox.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/info.svg (renamed from src/qcam/assets/feathericons/info.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/instagram.svg (renamed from src/qcam/assets/feathericons/instagram.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/italic.svg (renamed from src/qcam/assets/feathericons/italic.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/key.svg (renamed from src/qcam/assets/feathericons/key.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/layers.svg (renamed from src/qcam/assets/feathericons/layers.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/layout.svg (renamed from src/qcam/assets/feathericons/layout.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/life-buoy.svg (renamed from src/qcam/assets/feathericons/life-buoy.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/link-2.svg (renamed from src/qcam/assets/feathericons/link-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/link.svg (renamed from src/qcam/assets/feathericons/link.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/linkedin.svg (renamed from src/qcam/assets/feathericons/linkedin.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/list.svg (renamed from src/qcam/assets/feathericons/list.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/loader.svg (renamed from src/qcam/assets/feathericons/loader.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/lock.svg (renamed from src/qcam/assets/feathericons/lock.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/log-in.svg (renamed from src/qcam/assets/feathericons/log-in.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/log-out.svg (renamed from src/qcam/assets/feathericons/log-out.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/mail.svg (renamed from src/qcam/assets/feathericons/mail.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/map-pin.svg (renamed from src/qcam/assets/feathericons/map-pin.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/map.svg (renamed from src/qcam/assets/feathericons/map.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/maximize-2.svg (renamed from src/qcam/assets/feathericons/maximize-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/maximize.svg (renamed from src/qcam/assets/feathericons/maximize.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/meh.svg (renamed from src/qcam/assets/feathericons/meh.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/menu.svg (renamed from src/qcam/assets/feathericons/menu.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/message-circle.svg (renamed from src/qcam/assets/feathericons/message-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/message-square.svg (renamed from src/qcam/assets/feathericons/message-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/mic-off.svg (renamed from src/qcam/assets/feathericons/mic-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/mic.svg (renamed from src/qcam/assets/feathericons/mic.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minimize-2.svg (renamed from src/qcam/assets/feathericons/minimize-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minimize.svg (renamed from src/qcam/assets/feathericons/minimize.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minus-circle.svg (renamed from src/qcam/assets/feathericons/minus-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minus-square.svg (renamed from src/qcam/assets/feathericons/minus-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minus.svg (renamed from src/qcam/assets/feathericons/minus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/monitor.svg (renamed from src/qcam/assets/feathericons/monitor.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/moon.svg (renamed from src/qcam/assets/feathericons/moon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/more-horizontal.svg (renamed from src/qcam/assets/feathericons/more-horizontal.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/more-vertical.svg (renamed from src/qcam/assets/feathericons/more-vertical.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/mouse-pointer.svg (renamed from src/qcam/assets/feathericons/mouse-pointer.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/move.svg (renamed from src/qcam/assets/feathericons/move.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/music.svg (renamed from src/qcam/assets/feathericons/music.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/navigation-2.svg (renamed from src/qcam/assets/feathericons/navigation-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/navigation.svg (renamed from src/qcam/assets/feathericons/navigation.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/octagon.svg (renamed from src/qcam/assets/feathericons/octagon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/package.svg (renamed from src/qcam/assets/feathericons/package.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/paperclip.svg (renamed from src/qcam/assets/feathericons/paperclip.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pause-circle.svg (renamed from src/qcam/assets/feathericons/pause-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pause.svg (renamed from src/qcam/assets/feathericons/pause.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pen-tool.svg (renamed from src/qcam/assets/feathericons/pen-tool.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/percent.svg (renamed from src/qcam/assets/feathericons/percent.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-call.svg (renamed from src/qcam/assets/feathericons/phone-call.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-forwarded.svg (renamed from src/qcam/assets/feathericons/phone-forwarded.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-incoming.svg (renamed from src/qcam/assets/feathericons/phone-incoming.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-missed.svg (renamed from src/qcam/assets/feathericons/phone-missed.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-off.svg (renamed from src/qcam/assets/feathericons/phone-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-outgoing.svg (renamed from src/qcam/assets/feathericons/phone-outgoing.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone.svg (renamed from src/qcam/assets/feathericons/phone.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pie-chart.svg (renamed from src/qcam/assets/feathericons/pie-chart.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/play-circle.svg (renamed from src/qcam/assets/feathericons/play-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/play.svg (renamed from src/qcam/assets/feathericons/play.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/plus-circle.svg (renamed from src/qcam/assets/feathericons/plus-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/plus-square.svg (renamed from src/qcam/assets/feathericons/plus-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/plus.svg (renamed from src/qcam/assets/feathericons/plus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pocket.svg (renamed from src/qcam/assets/feathericons/pocket.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/power.svg (renamed from src/qcam/assets/feathericons/power.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/printer.svg (renamed from src/qcam/assets/feathericons/printer.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/radio.svg (renamed from src/qcam/assets/feathericons/radio.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/refresh-ccw.svg (renamed from src/qcam/assets/feathericons/refresh-ccw.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/refresh-cw.svg (renamed from src/qcam/assets/feathericons/refresh-cw.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/repeat.svg (renamed from src/qcam/assets/feathericons/repeat.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/rewind.svg (renamed from src/qcam/assets/feathericons/rewind.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/rotate-ccw.svg (renamed from src/qcam/assets/feathericons/rotate-ccw.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/rotate-cw.svg (renamed from src/qcam/assets/feathericons/rotate-cw.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/rss.svg (renamed from src/qcam/assets/feathericons/rss.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/save.svg (renamed from src/qcam/assets/feathericons/save.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/scissors.svg (renamed from src/qcam/assets/feathericons/scissors.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/search.svg (renamed from src/qcam/assets/feathericons/search.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/send.svg (renamed from src/qcam/assets/feathericons/send.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/server.svg (renamed from src/qcam/assets/feathericons/server.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/settings.svg (renamed from src/qcam/assets/feathericons/settings.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/share-2.svg (renamed from src/qcam/assets/feathericons/share-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/share.svg (renamed from src/qcam/assets/feathericons/share.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shield-off.svg (renamed from src/qcam/assets/feathericons/shield-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shield.svg (renamed from src/qcam/assets/feathericons/shield.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shopping-bag.svg (renamed from src/qcam/assets/feathericons/shopping-bag.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shopping-cart.svg (renamed from src/qcam/assets/feathericons/shopping-cart.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shuffle.svg (renamed from src/qcam/assets/feathericons/shuffle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sidebar.svg (renamed from src/qcam/assets/feathericons/sidebar.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/skip-back.svg (renamed from src/qcam/assets/feathericons/skip-back.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/skip-forward.svg (renamed from src/qcam/assets/feathericons/skip-forward.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/slack.svg (renamed from src/qcam/assets/feathericons/slack.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/slash.svg (renamed from src/qcam/assets/feathericons/slash.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sliders.svg (renamed from src/qcam/assets/feathericons/sliders.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/smartphone.svg (renamed from src/qcam/assets/feathericons/smartphone.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/smile.svg (renamed from src/qcam/assets/feathericons/smile.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/speaker.svg (renamed from src/qcam/assets/feathericons/speaker.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/square.svg (renamed from src/qcam/assets/feathericons/square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/star.svg (renamed from src/qcam/assets/feathericons/star.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/stop-circle.svg (renamed from src/qcam/assets/feathericons/stop-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sun.svg (renamed from src/qcam/assets/feathericons/sun.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sunrise.svg (renamed from src/qcam/assets/feathericons/sunrise.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sunset.svg (renamed from src/qcam/assets/feathericons/sunset.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/tablet.svg (renamed from src/qcam/assets/feathericons/tablet.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/tag.svg (renamed from src/qcam/assets/feathericons/tag.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/target.svg (renamed from src/qcam/assets/feathericons/target.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/terminal.svg (renamed from src/qcam/assets/feathericons/terminal.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/thermometer.svg (renamed from src/qcam/assets/feathericons/thermometer.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/thumbs-down.svg (renamed from src/qcam/assets/feathericons/thumbs-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/thumbs-up.svg (renamed from src/qcam/assets/feathericons/thumbs-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/toggle-left.svg (renamed from src/qcam/assets/feathericons/toggle-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/toggle-right.svg (renamed from src/qcam/assets/feathericons/toggle-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/tool.svg (renamed from src/qcam/assets/feathericons/tool.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trash-2.svg (renamed from src/qcam/assets/feathericons/trash-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trash.svg (renamed from src/qcam/assets/feathericons/trash.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trello.svg (renamed from src/qcam/assets/feathericons/trello.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trending-down.svg (renamed from src/qcam/assets/feathericons/trending-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trending-up.svg (renamed from src/qcam/assets/feathericons/trending-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/triangle.svg (renamed from src/qcam/assets/feathericons/triangle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/truck.svg (renamed from src/qcam/assets/feathericons/truck.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/tv.svg (renamed from src/qcam/assets/feathericons/tv.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/twitch.svg (renamed from src/qcam/assets/feathericons/twitch.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/twitter.svg (renamed from src/qcam/assets/feathericons/twitter.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/type.svg (renamed from src/qcam/assets/feathericons/type.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/umbrella.svg (renamed from src/qcam/assets/feathericons/umbrella.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/underline.svg (renamed from src/qcam/assets/feathericons/underline.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/unlock.svg (renamed from src/qcam/assets/feathericons/unlock.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/upload-cloud.svg (renamed from src/qcam/assets/feathericons/upload-cloud.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/upload.svg (renamed from src/qcam/assets/feathericons/upload.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user-check.svg (renamed from src/qcam/assets/feathericons/user-check.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user-minus.svg (renamed from src/qcam/assets/feathericons/user-minus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user-plus.svg (renamed from src/qcam/assets/feathericons/user-plus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user-x.svg (renamed from src/qcam/assets/feathericons/user-x.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user.svg (renamed from src/qcam/assets/feathericons/user.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/users.svg (renamed from src/qcam/assets/feathericons/users.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/video-off.svg (renamed from src/qcam/assets/feathericons/video-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/video.svg (renamed from src/qcam/assets/feathericons/video.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/voicemail.svg (renamed from src/qcam/assets/feathericons/voicemail.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/volume-1.svg (renamed from src/qcam/assets/feathericons/volume-1.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/volume-2.svg (renamed from src/qcam/assets/feathericons/volume-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/volume-x.svg (renamed from src/qcam/assets/feathericons/volume-x.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/volume.svg (renamed from src/qcam/assets/feathericons/volume.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/watch.svg (renamed from src/qcam/assets/feathericons/watch.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/wifi-off.svg (renamed from src/qcam/assets/feathericons/wifi-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/wifi.svg (renamed from src/qcam/assets/feathericons/wifi.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/wind.svg (renamed from src/qcam/assets/feathericons/wind.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/x-circle.svg (renamed from src/qcam/assets/feathericons/x-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/x-octagon.svg (renamed from src/qcam/assets/feathericons/x-octagon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/x-square.svg (renamed from src/qcam/assets/feathericons/x-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/x.svg (renamed from src/qcam/assets/feathericons/x.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/youtube.svg (renamed from src/qcam/assets/feathericons/youtube.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/zap-off.svg (renamed from src/qcam/assets/feathericons/zap-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/zap.svg (renamed from src/qcam/assets/feathericons/zap.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/zoom-in.svg (renamed from src/qcam/assets/feathericons/zoom-in.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/zoom-out.svg (renamed from src/qcam/assets/feathericons/zoom-out.svg)0
-rw-r--r--src/apps/qcam/assets/shader/RGB.frag (renamed from src/qcam/assets/shader/RGB.frag)0
-rw-r--r--src/apps/qcam/assets/shader/YUV_2_planes.frag (renamed from src/qcam/assets/shader/YUV_2_planes.frag)29
-rw-r--r--src/apps/qcam/assets/shader/YUV_3_planes.frag (renamed from src/qcam/assets/shader/YUV_3_planes.frag)27
-rw-r--r--src/apps/qcam/assets/shader/YUV_packed.frag (renamed from src/qcam/assets/shader/YUV_packed.frag)17
-rw-r--r--src/apps/qcam/assets/shader/bayer_1x_packed.frag (renamed from src/qcam/assets/shader/bayer_1x_packed.frag)0
-rw-r--r--src/apps/qcam/assets/shader/bayer_8.frag (renamed from src/qcam/assets/shader/bayer_8.frag)3
-rw-r--r--src/apps/qcam/assets/shader/bayer_8.vert (renamed from src/qcam/assets/shader/bayer_8.vert)0
-rw-r--r--src/apps/qcam/assets/shader/identity.vert (renamed from src/qcam/assets/shader/identity.vert)0
-rw-r--r--src/apps/qcam/assets/shader/shaders.qrc (renamed from src/qcam/assets/shader/shaders.qrc)0
-rw-r--r--src/apps/qcam/cam_select_dialog.cpp111
-rw-r--r--src/apps/qcam/cam_select_dialog.h47
-rw-r--r--src/apps/qcam/format_converter.cpp (renamed from src/qcam/format_converter.cpp)8
-rw-r--r--src/apps/qcam/format_converter.h (renamed from src/qcam/format_converter.h)2
-rw-r--r--src/apps/qcam/main.cpp (renamed from src/qcam/main.cpp)7
-rw-r--r--src/apps/qcam/main_window.cpp (renamed from src/qcam/main_window.cpp)100
-rw-r--r--src/apps/qcam/main_window.h (renamed from src/qcam/main_window.h)31
-rw-r--r--src/apps/qcam/meson.build (renamed from src/qcam/meson.build)35
-rw-r--r--src/apps/qcam/message_handler.cpp (renamed from src/qcam/message_handler.cpp)2
-rw-r--r--src/apps/qcam/message_handler.h (renamed from src/qcam/message_handler.h)2
-rw-r--r--src/apps/qcam/viewfinder.h (renamed from src/qcam/viewfinder.h)4
-rw-r--r--src/apps/qcam/viewfinder_gl.cpp (renamed from src/qcam/viewfinder_gl.cpp)88
-rw-r--r--src/apps/qcam/viewfinder_gl.h (renamed from src/qcam/viewfinder_gl.h)6
-rw-r--r--src/apps/qcam/viewfinder_qt.cpp (renamed from src/qcam/viewfinder_qt.cpp)20
-rw-r--r--src/apps/qcam/viewfinder_qt.h (renamed from src/qcam/viewfinder_qt.h)3
-rw-r--r--src/cam/capture_script.cpp336
-rw-r--r--src/cam/sdl_texture_mjpg.cpp25
-rw-r--r--src/cam/sdl_texture_yuyv.cpp20
-rw-r--r--src/cam/sdl_texture_yuyv.h17
-rw-r--r--src/gstreamer/gstlibcamera-utils.cpp363
-rw-r--r--src/gstreamer/gstlibcamera-utils.h20
-rw-r--r--src/gstreamer/gstlibcamera.cpp2
-rw-r--r--src/gstreamer/gstlibcameraallocator.cpp2
-rw-r--r--src/gstreamer/gstlibcameraallocator.h2
-rw-r--r--src/gstreamer/gstlibcamerapad.cpp57
-rw-r--r--src/gstreamer/gstlibcamerapad.h8
-rw-r--r--src/gstreamer/gstlibcamerapool.cpp9
-rw-r--r--src/gstreamer/gstlibcamerapool.h4
-rw-r--r--src/gstreamer/gstlibcameraprovider.cpp22
-rw-r--r--src/gstreamer/gstlibcameraprovider.h2
-rw-r--r--src/gstreamer/gstlibcamerasrc.cpp602
-rw-r--r--src/gstreamer/gstlibcamerasrc.h33
-rw-r--r--src/gstreamer/meson.build14
-rwxr-xr-xsrc/ipa/ipa-sign-install.sh2
-rwxr-xr-xsrc/ipa/ipa-sign.sh2
-rw-r--r--src/ipa/ipu3/algorithms/af.cpp80
-rw-r--r--src/ipa/ipu3/algorithms/af.h14
-rw-r--r--src/ipa/ipu3/algorithms/agc.cpp321
-rw-r--r--src/ipa/ipu3/algorithms/agc.h35
-rw-r--r--src/ipa/ipu3/algorithms/algorithm.h2
-rw-r--r--src/ipa/ipu3/algorithms/awb.cpp179
-rw-r--r--src/ipa/ipu3/algorithms/awb.h12
-rw-r--r--src/ipa/ipu3/algorithms/blc.cpp14
-rw-r--r--src/ipa/ipu3/algorithms/blc.h6
-rw-r--r--src/ipa/ipu3/algorithms/tone_mapping.cpp26
-rw-r--r--src/ipa/ipu3/algorithms/tone_mapping.h11
-rw-r--r--src/ipa/ipu3/data/meson.build9
-rw-r--r--src/ipa/ipu3/data/uncalibrated.yaml11
-rw-r--r--src/ipa/ipu3/ipa_context.cpp54
-rw-r--r--src/ipa/ipu3/ipa_context.h24
-rw-r--r--src/ipa/ipu3/ipu3-ipa-design-guide.rst2
-rw-r--r--src/ipa/ipu3/ipu3.cpp170
-rw-r--r--src/ipa/ipu3/meson.build3
-rw-r--r--src/ipa/ipu3/module.h2
-rw-r--r--src/ipa/libipa/agc_mean_luminance.cpp577
-rw-r--r--src/ipa/libipa/agc_mean_luminance.h96
-rw-r--r--src/ipa/libipa/algorithm.cpp29
-rw-r--r--src/ipa/libipa/algorithm.h20
-rw-r--r--src/ipa/libipa/camera_sensor_helper.cpp217
-rw-r--r--src/ipa/libipa/camera_sensor_helper.h51
-rw-r--r--src/ipa/libipa/exposure_mode_helper.cpp246
-rw-r--r--src/ipa/libipa/exposure_mode_helper.h53
-rw-r--r--src/ipa/libipa/fc_queue.cpp140
-rw-r--r--src/ipa/libipa/fc_queue.h118
-rw-r--r--src/ipa/libipa/histogram.cpp32
-rw-r--r--src/ipa/libipa/histogram.h19
-rw-r--r--src/ipa/libipa/meson.build6
-rw-r--r--src/ipa/libipa/module.cpp4
-rw-r--r--src/ipa/libipa/module.h2
-rw-r--r--src/ipa/meson.build46
-rw-r--r--src/ipa/raspberrypi/cam_helper.cpp219
-rw-r--r--src/ipa/raspberrypi/cam_helper.hpp123
-rw-r--r--src/ipa/raspberrypi/cam_helper_imx290.cpp67
-rw-r--r--src/ipa/raspberrypi/cam_helper_imx296.cpp69
-rw-r--r--src/ipa/raspberrypi/cam_helper_ov9281.cpp65
-rw-r--r--src/ipa/raspberrypi/controller/agc_algorithm.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/agc_status.h41
-rw-r--r--src/ipa/raspberrypi/controller/algorithm.cpp44
-rw-r--r--src/ipa/raspberrypi/controller/algorithm.hpp60
-rw-r--r--src/ipa/raspberrypi/controller/alsc_status.h27
-rw-r--r--src/ipa/raspberrypi/controller/awb_algorithm.hpp23
-rw-r--r--src/ipa/raspberrypi/controller/awb_status.h26
-rw-r--r--src/ipa/raspberrypi/controller/black_level_status.h23
-rw-r--r--src/ipa/raspberrypi/controller/camera_mode.h50
-rw-r--r--src/ipa/raspberrypi/controller/ccm_algorithm.hpp21
-rw-r--r--src/ipa/raspberrypi/controller/ccm_status.h22
-rw-r--r--src/ipa/raspberrypi/controller/contrast_algorithm.hpp22
-rw-r--r--src/ipa/raspberrypi/controller/contrast_status.h31
-rw-r--r--src/ipa/raspberrypi/controller/controller.cpp104
-rw-r--r--src/ipa/raspberrypi/controller/controller.hpp54
-rw-r--r--src/ipa/raspberrypi/controller/denoise_algorithm.hpp23
-rw-r--r--src/ipa/raspberrypi/controller/denoise_status.h24
-rw-r--r--src/ipa/raspberrypi/controller/device_status.cpp30
-rw-r--r--src/ipa/raspberrypi/controller/dpc_status.h21
-rw-r--r--src/ipa/raspberrypi/controller/focus_status.h26
-rw-r--r--src/ipa/raspberrypi/controller/geq_status.h22
-rw-r--r--src/ipa/raspberrypi/controller/histogram.cpp64
-rw-r--r--src/ipa/raspberrypi/controller/histogram.hpp44
-rw-r--r--src/ipa/raspberrypi/controller/lux_status.h29
-rw-r--r--src/ipa/raspberrypi/controller/metadata.hpp110
-rw-r--r--src/ipa/raspberrypi/controller/noise_status.h22
-rw-r--r--src/ipa/raspberrypi/controller/pwl.cpp246
-rw-r--r--src/ipa/raspberrypi/controller/pwl.hpp112
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.cpp797
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.hpp139
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.cpp787
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.hpp106
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.cpp667
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.hpp179
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.cpp63
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.hpp30
-rw-r--r--src/ipa/raspberrypi/controller/rpi/ccm.cpp169
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.cpp185
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.hpp50
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.cpp53
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/focus.cpp50
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.cpp81
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.hpp34
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.cpp104
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.hpp43
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.cpp76
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.cpp75
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.cpp85
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.hpp34
-rw-r--r--src/ipa/raspberrypi/controller/sharpen_algorithm.hpp21
-rw-r--r--src/ipa/raspberrypi/controller/sharpen_status.h28
-rw-r--r--src/ipa/raspberrypi/data/imx219.json412
-rw-r--r--src/ipa/raspberrypi/data/imx219_noir.json344
-rw-r--r--src/ipa/raspberrypi/data/imx290.json165
-rw-r--r--src/ipa/raspberrypi/data/imx296.json191
-rw-r--r--src/ipa/raspberrypi/data/imx378.json338
-rw-r--r--src/ipa/raspberrypi/data/imx477.json430
-rw-r--r--src/ipa/raspberrypi/data/imx477_noir.json362
-rw-r--r--src/ipa/raspberrypi/data/imx519.json338
-rw-r--r--src/ipa/raspberrypi/data/ov5647.json409
-rw-r--r--src/ipa/raspberrypi/data/ov5647_noir.json341
-rw-r--r--src/ipa/raspberrypi/data/ov9281.json92
-rw-r--r--src/ipa/raspberrypi/data/se327m12.json341
-rw-r--r--src/ipa/raspberrypi/data/uncalibrated.json82
-rw-r--r--src/ipa/raspberrypi/md_parser_smia.cpp149
-rw-r--r--src/ipa/raspberrypi/meson.build66
-rw-r--r--src/ipa/raspberrypi/raspberrypi.cpp1460
-rw-r--r--src/ipa/rkisp1/algorithms/agc.cpp386
-rw-r--r--src/ipa/rkisp1/algorithms/agc.h39
-rw-r--r--src/ipa/rkisp1/algorithms/algorithm.h14
-rw-r--r--src/ipa/rkisp1/algorithms/awb.cpp321
-rw-r--r--src/ipa/rkisp1/algorithms/awb.h21
-rw-r--r--src/ipa/rkisp1/algorithms/blc.cpp57
-rw-r--r--src/ipa/rkisp1/algorithms/blc.h20
-rw-r--r--src/ipa/rkisp1/algorithms/cproc.cpp111
-rw-r--r--src/ipa/rkisp1/algorithms/cproc.h33
-rw-r--r--src/ipa/rkisp1/algorithms/dpcc.cpp251
-rw-r--r--src/ipa/rkisp1/algorithms/dpcc.h32
-rw-r--r--src/ipa/rkisp1/algorithms/dpf.cpp260
-rw-r--r--src/ipa/rkisp1/algorithms/dpf.h38
-rw-r--r--src/ipa/rkisp1/algorithms/filter.cpp216
-rw-r--r--src/ipa/rkisp1/algorithms/filter.h33
-rw-r--r--src/ipa/rkisp1/algorithms/gsl.cpp146
-rw-r--r--src/ipa/rkisp1/algorithms/gsl.h35
-rw-r--r--src/ipa/rkisp1/algorithms/lsc.cpp342
-rw-r--r--src/ipa/rkisp1/algorithms/lsc.h59
-rw-r--r--src/ipa/rkisp1/algorithms/meson.build6
-rw-r--r--src/ipa/rkisp1/data/imx219.yaml107
-rw-r--r--src/ipa/rkisp1/data/imx258.yaml54
-rw-r--r--src/ipa/rkisp1/data/meson.build4
-rw-r--r--src/ipa/rkisp1/data/ov2685.yaml41
-rw-r--r--src/ipa/rkisp1/data/ov4689.yaml13
-rw-r--r--src/ipa/rkisp1/data/ov5640.yaml243
-rw-r--r--src/ipa/rkisp1/data/ov5695.yaml41
-rw-r--r--src/ipa/rkisp1/data/ov8858.yaml54
-rw-r--r--src/ipa/rkisp1/data/uncalibrated.yaml2
-rw-r--r--src/ipa/rkisp1/ipa_context.cpp316
-rw-r--r--src/ipa/rkisp1/ipa_context.h113
-rw-r--r--src/ipa/rkisp1/meson.build2
-rw-r--r--src/ipa/rkisp1/module.h2
-rw-r--r--src/ipa/rkisp1/rkisp1.cpp316
-rw-r--r--src/ipa/rpi/README.md (renamed from src/ipa/raspberrypi/README.md)0
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper.cpp265
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper.h132
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx219.cpp (renamed from src/ipa/raspberrypi/cam_helper_imx219.cpp)50
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx290.cpp75
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx296.cpp83
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx477.cpp (renamed from src/ipa/raspberrypi/cam_helper_imx477.cpp)103
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx519.cpp (renamed from src/ipa/raspberrypi/cam_helper_imx519.cpp)101
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx708.cpp382
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp (renamed from src/ipa/raspberrypi/cam_helper_ov5647.cpp)51
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp74
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp66
-rw-r--r--src/ipa/rpi/cam_helper/md_parser.h (renamed from src/ipa/raspberrypi/md_parser.hpp)50
-rw-r--r--src/ipa/rpi/cam_helper/md_parser_smia.cpp152
-rw-r--r--src/ipa/rpi/cam_helper/meson.build27
-rw-r--r--src/ipa/rpi/common/ipa_base.cpp1514
-rw-r--r--src/ipa/rpi/common/ipa_base.h143
-rw-r--r--src/ipa/rpi/common/meson.build17
-rw-r--r--src/ipa/rpi/controller/af_algorithm.h76
-rw-r--r--src/ipa/rpi/controller/af_status.h35
-rw-r--r--src/ipa/rpi/controller/agc_algorithm.h38
-rw-r--r--src/ipa/rpi/controller/agc_status.h48
-rw-r--r--src/ipa/rpi/controller/algorithm.cpp56
-rw-r--r--src/ipa/rpi/controller/algorithm.h68
-rw-r--r--src/ipa/rpi/controller/alsc_status.h22
-rw-r--r--src/ipa/rpi/controller/awb_algorithm.h26
-rw-r--r--src/ipa/rpi/controller/awb_status.h20
-rw-r--r--src/ipa/rpi/controller/black_level_algorithm.h23
-rw-r--r--src/ipa/rpi/controller/black_level_status.h15
-rw-r--r--src/ipa/rpi/controller/cac_status.h16
-rw-r--r--src/ipa/rpi/controller/camera_mode.h59
-rw-r--r--src/ipa/rpi/controller/ccm_algorithm.h21
-rw-r--r--src/ipa/rpi/controller/ccm_status.h14
-rw-r--r--src/ipa/rpi/controller/contrast_algorithm.h24
-rw-r--r--src/ipa/rpi/controller/contrast_status.h20
-rw-r--r--src/ipa/rpi/controller/controller.cpp220
-rw-r--r--src/ipa/rpi/controller/controller.h77
-rw-r--r--src/ipa/rpi/controller/denoise_algorithm.h27
-rw-r--r--src/ipa/rpi/controller/denoise_status.h35
-rw-r--r--src/ipa/rpi/controller/device_status.cpp31
-rw-r--r--src/ipa/rpi/controller/device_status.h (renamed from src/ipa/raspberrypi/controller/device_status.h)26
-rw-r--r--src/ipa/rpi/controller/dpc_status.h13
-rw-r--r--src/ipa/rpi/controller/geq_status.h14
-rw-r--r--src/ipa/rpi/controller/hdr_algorithm.h25
-rw-r--r--src/ipa/rpi/controller/hdr_status.h19
-rw-r--r--src/ipa/rpi/controller/histogram.cpp76
-rw-r--r--src/ipa/rpi/controller/histogram.h55
-rw-r--r--src/ipa/rpi/controller/lux_status.h23
-rw-r--r--src/ipa/rpi/controller/meson.build35
-rw-r--r--src/ipa/rpi/controller/metadata.h127
-rw-r--r--src/ipa/rpi/controller/noise_status.h14
-rw-r--r--src/ipa/rpi/controller/pdaf_data.h24
-rw-r--r--src/ipa/rpi/controller/pwl.cpp269
-rw-r--r--src/ipa/rpi/controller/pwl.h127
-rw-r--r--src/ipa/rpi/controller/region_stats.h123
-rw-r--r--src/ipa/rpi/controller/rpi/af.cpp797
-rw-r--r--src/ipa/rpi/controller/rpi/af.h165
-rw-r--r--src/ipa/rpi/controller/rpi/agc.cpp338
-rw-r--r--src/ipa/rpi/controller/rpi/agc.h58
-rw-r--r--src/ipa/rpi/controller/rpi/agc_channel.cpp1022
-rw-r--r--src/ipa/rpi/controller/rpi/agc_channel.h153
-rw-r--r--src/ipa/rpi/controller/rpi/alsc.cpp867
-rw-r--r--src/ipa/rpi/controller/rpi/alsc.h174
-rw-r--r--src/ipa/rpi/controller/rpi/awb.cpp751
-rw-r--r--src/ipa/rpi/controller/rpi/awb.h192
-rw-r--r--src/ipa/rpi/controller/rpi/black_level.cpp74
-rw-r--r--src/ipa/rpi/controller/rpi/black_level.h32
-rw-r--r--src/ipa/rpi/controller/rpi/cac.cpp107
-rw-r--r--src/ipa/rpi/controller/rpi/cac.h35
-rw-r--r--src/ipa/rpi/controller/rpi/ccm.cpp199
-rw-r--r--src/ipa/rpi/controller/rpi/ccm.h (renamed from src/ipa/raspberrypi/controller/rpi/ccm.hpp)24
-rw-r--r--src/ipa/rpi/controller/rpi/contrast.cpp192
-rw-r--r--src/ipa/rpi/controller/rpi/contrast.h54
-rw-r--r--src/ipa/rpi/controller/rpi/denoise.cpp198
-rw-r--r--src/ipa/rpi/controller/rpi/denoise.h59
-rw-r--r--src/ipa/rpi/controller/rpi/dpc.cpp59
-rw-r--r--src/ipa/rpi/controller/rpi/dpc.h32
-rw-r--r--src/ipa/rpi/controller/rpi/focus.h (renamed from src/ipa/raspberrypi/controller/rpi/focus.hpp)12
-rw-r--r--src/ipa/rpi/controller/rpi/geq.cpp89
-rw-r--r--src/ipa/rpi/controller/rpi/geq.h34
-rw-r--r--src/ipa/rpi/controller/rpi/hdr.cpp417
-rw-r--r--src/ipa/rpi/controller/rpi/hdr.h84
-rw-r--r--src/ipa/rpi/controller/rpi/lux.cpp115
-rw-r--r--src/ipa/rpi/controller/rpi/lux.h45
-rw-r--r--src/ipa/rpi/controller/rpi/noise.cpp89
-rw-r--r--src/ipa/rpi/controller/rpi/noise.h32
-rw-r--r--src/ipa/rpi/controller/rpi/saturation.cpp57
-rw-r--r--src/ipa/rpi/controller/rpi/saturation.h32
-rw-r--r--src/ipa/rpi/controller/rpi/sdn.cpp83
-rw-r--r--src/ipa/rpi/controller/rpi/sdn.h32
-rw-r--r--src/ipa/rpi/controller/rpi/sharpen.cpp92
-rw-r--r--src/ipa/rpi/controller/rpi/sharpen.h34
-rw-r--r--src/ipa/rpi/controller/rpi/tonemap.cpp61
-rw-r--r--src/ipa/rpi/controller/rpi/tonemap.h35
-rw-r--r--src/ipa/rpi/controller/saturation_status.h13
-rw-r--r--src/ipa/rpi/controller/sharpen_algorithm.h21
-rw-r--r--src/ipa/rpi/controller/sharpen_status.h20
-rw-r--r--src/ipa/rpi/controller/statistics.h78
-rw-r--r--src/ipa/rpi/controller/stitch_status.h17
-rw-r--r--src/ipa/rpi/controller/tonemap_status.h17
-rw-r--r--src/ipa/rpi/meson.build14
-rw-r--r--src/ipa/rpi/vc4/data/imx219.json695
-rw-r--r--src/ipa/rpi/vc4/data/imx219_noir.json629
-rw-r--r--src/ipa/rpi/vc4/data/imx290.json214
-rw-r--r--src/ipa/rpi/vc4/data/imx296.json443
-rw-r--r--src/ipa/rpi/vc4/data/imx296_mono.json240
-rw-r--r--src/ipa/rpi/vc4/data/imx378.json427
-rw-r--r--src/ipa/rpi/vc4/data/imx477.json700
-rw-r--r--src/ipa/rpi/vc4/data/imx477_noir.json656
-rw-r--r--src/ipa/rpi/vc4/data/imx477_scientific.json488
-rw-r--r--src/ipa/rpi/vc4/data/imx477_v1.json525
-rw-r--r--src/ipa/rpi/vc4/data/imx519.json427
-rw-r--r--src/ipa/rpi/vc4/data/imx708.json671
-rw-r--r--src/ipa/rpi/vc4/data/imx708_noir.json770
-rw-r--r--src/ipa/rpi/vc4/data/imx708_wide.json682
-rw-r--r--src/ipa/rpi/vc4/data/imx708_wide_noir.json673
-rw-r--r--src/ipa/rpi/vc4/data/meson.build (renamed from src/ipa/raspberrypi/data/meson.build)12
-rw-r--r--src/ipa/rpi/vc4/data/ov5647.json696
-rw-r--r--src/ipa/rpi/vc4/data/ov5647_noir.json412
-rw-r--r--src/ipa/rpi/vc4/data/ov64a40.json422
-rw-r--r--src/ipa/rpi/vc4/data/ov9281_mono.json136
-rw-r--r--src/ipa/rpi/vc4/data/se327m12.json432
-rw-r--r--src/ipa/rpi/vc4/data/uncalibrated.json131
-rw-r--r--src/ipa/rpi/vc4/meson.build48
-rw-r--r--src/ipa/rpi/vc4/vc4.cpp597
-rw-r--r--src/ipa/simple/black_level.cpp88
-rw-r--r--src/ipa/simple/black_level.h28
-rw-r--r--src/ipa/simple/data/meson.build10
-rw-r--r--src/ipa/simple/data/uncalibrated.yaml5
-rw-r--r--src/ipa/simple/meson.build30
-rw-r--r--src/ipa/simple/soft_simple.cpp403
-rw-r--r--src/ipa/vimc/data/meson.build3
-rw-r--r--src/ipa/vimc/meson.build2
-rw-r--r--src/ipa/vimc/vimc.cpp28
-rw-r--r--src/libcamera/base/backtrace.cpp14
-rw-r--r--src/libcamera/base/bound_method.cpp3
-rw-r--r--src/libcamera/base/class.cpp2
-rw-r--r--src/libcamera/base/event_dispatcher.cpp2
-rw-r--r--src/libcamera/base/event_dispatcher_poll.cpp2
-rw-r--r--src/libcamera/base/event_notifier.cpp8
-rw-r--r--src/libcamera/base/file.cpp7
-rw-r--r--src/libcamera/base/flags.cpp2
-rw-r--r--src/libcamera/base/log.cpp56
-rw-r--r--src/libcamera/base/meson.build7
-rw-r--r--src/libcamera/base/message.cpp2
-rw-r--r--src/libcamera/base/mutex.cpp2
-rw-r--r--src/libcamera/base/object.cpp57
-rw-r--r--src/libcamera/base/semaphore.cpp6
-rw-r--r--src/libcamera/base/shared_fd.cpp2
-rw-r--r--src/libcamera/base/signal.cpp5
-rw-r--r--src/libcamera/base/thread.cpp19
-rw-r--r--src/libcamera/base/timer.cpp12
-rw-r--r--src/libcamera/base/unique_fd.cpp2
-rw-r--r--src/libcamera/base/utils.cpp70
-rw-r--r--src/libcamera/bayer_format.cpp64
-rw-r--r--src/libcamera/byte_stream_buffer.cpp2
-rw-r--r--src/libcamera/camera.cpp261
-rw-r--r--src/libcamera/camera_controls.cpp2
-rw-r--r--src/libcamera/camera_lens.cpp2
-rw-r--r--src/libcamera/camera_manager.cpp262
-rw-r--r--src/libcamera/color_space.cpp431
-rw-r--r--src/libcamera/control_ids.cpp.in14
-rw-r--r--src/libcamera/control_ids_core.yaml (renamed from src/libcamera/control_ids.yaml)408
-rw-r--r--src/libcamera/control_ids_draft.yaml230
-rw-r--r--src/libcamera/control_ids_rpi.yaml29
-rw-r--r--src/libcamera/control_ranges.yaml18
-rw-r--r--src/libcamera/control_serializer.cpp30
-rw-r--r--src/libcamera/control_validator.cpp2
-rw-r--r--src/libcamera/controls.cpp70
-rw-r--r--src/libcamera/converter.cpp335
-rw-r--r--src/libcamera/converter/converter_v4l2_m2m.cpp (renamed from src/libcamera/pipeline/simple/converter.cpp)169
-rw-r--r--src/libcamera/converter/meson.build5
-rw-r--r--src/libcamera/delayed_controls.cpp16
-rw-r--r--src/libcamera/device_enumerator.cpp16
-rw-r--r--src/libcamera/device_enumerator_sysfs.cpp2
-rw-r--r--src/libcamera/device_enumerator_udev.cpp12
-rw-r--r--src/libcamera/dma_heaps.cpp165
-rw-r--r--src/libcamera/fence.cpp2
-rw-r--r--src/libcamera/formats.cpp527
-rw-r--r--src/libcamera/formats.yaml51
-rw-r--r--src/libcamera/framebuffer.cpp60
-rw-r--r--src/libcamera/framebuffer_allocator.cpp21
-rw-r--r--src/libcamera/geometry.cpp10
-rw-r--r--src/libcamera/ipa/meson.build7
-rw-r--r--src/libcamera/ipa_controls.cpp2
-rw-r--r--src/libcamera/ipa_data_serializer.cpp2
-rw-r--r--src/libcamera/ipa_interface.cpp2
-rw-r--r--src/libcamera/ipa_manager.cpp22
-rw-r--r--src/libcamera/ipa_module.cpp25
-rw-r--r--src/libcamera/ipa_proxy.cpp2
-rw-r--r--src/libcamera/ipa_pub_key.cpp.in2
-rw-r--r--src/libcamera/ipc_pipe.cpp2
-rw-r--r--src/libcamera/ipc_pipe_unixsocket.cpp2
-rw-r--r--src/libcamera/ipc_unixsocket.cpp2
-rw-r--r--src/libcamera/mapped_framebuffer.cpp2
-rw-r--r--src/libcamera/media_device.cpp20
-rw-r--r--src/libcamera/media_object.cpp2
-rw-r--r--src/libcamera/meson.build72
-rw-r--r--src/libcamera/orientation.cpp115
-rw-r--r--src/libcamera/pipeline/imx8-isi/imx8-isi.cpp1117
-rw-r--r--src/libcamera/pipeline/imx8-isi/meson.build5
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp19
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.h6
-rw-r--r--src/libcamera/pipeline/ipu3/frames.cpp2
-rw-r--r--src/libcamera/pipeline/ipu3/frames.h2
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.cpp8
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.h2
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp159
-rw-r--r--src/libcamera/pipeline/mali-c55/mali-c55.cpp1066
-rw-r--r--src/libcamera/pipeline/mali-c55/meson.build5
-rw-r--r--src/libcamera/pipeline/meson.build15
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.cpp90
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.h32
-rw-r--r--src/libcamera/pipeline/raspberrypi/raspberrypi.cpp2200
-rw-r--r--src/libcamera/pipeline/raspberrypi/rpi_stream.h178
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp520
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.cpp299
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.h18
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.cpp293
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.h87
-rw-r--r--src/libcamera/pipeline/rpi/common/meson.build (renamed from src/libcamera/pipeline/raspberrypi/meson.build)4
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.cpp1491
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.h286
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.cpp (renamed from src/libcamera/pipeline/raspberrypi/rpi_stream.cpp)159
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.h199
-rw-r--r--src/libcamera/pipeline/rpi/meson.build12
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/example.yaml46
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/meson.build9
-rw-r--r--src/libcamera/pipeline/rpi/vc4/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp1023
-rw-r--r--src/libcamera/pipeline/simple/converter.h98
-rw-r--r--src/libcamera/pipeline/simple/meson.build1
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp465
-rw-r--r--src/libcamera/pipeline/uvcvideo/uvcvideo.cpp237
-rw-r--r--src/libcamera/pipeline/vimc/vimc.cpp49
-rw-r--r--src/libcamera/pipeline_handler.cpp260
-rw-r--r--src/libcamera/pixel_format.cpp2
-rw-r--r--src/libcamera/process.cpp6
-rw-r--r--src/libcamera/property_ids.cpp.in14
-rw-r--r--src/libcamera/property_ids_core.yaml (renamed from src/libcamera/property_ids.yaml)50
-rw-r--r--src/libcamera/property_ids_draft.yaml39
-rw-r--r--src/libcamera/proxy/worker/meson.build2
-rw-r--r--src/libcamera/pub_key.cpp52
-rw-r--r--src/libcamera/request.cpp17
-rw-r--r--src/libcamera/sensor/camera_sensor.cpp (renamed from src/libcamera/camera_sensor.cpp)587
-rw-r--r--src/libcamera/sensor/camera_sensor_properties.cpp (renamed from src/libcamera/camera_sensor_properties.cpp)102
-rw-r--r--src/libcamera/sensor/meson.build6
-rw-r--r--src/libcamera/shared_mem_object.cpp242
-rw-r--r--src/libcamera/software_isp/TODO279
-rw-r--r--src/libcamera/software_isp/debayer.cpp132
-rw-r--r--src/libcamera/software_isp/debayer.h54
-rw-r--r--src/libcamera/software_isp/debayer_cpu.cpp807
-rw-r--r--src/libcamera/software_isp/debayer_cpu.h158
-rw-r--r--src/libcamera/software_isp/meson.build15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp357
-rw-r--r--src/libcamera/software_isp/swstats_cpu.cpp432
-rw-r--r--src/libcamera/software_isp/swstats_cpu.h97
-rw-r--r--src/libcamera/source_paths.cpp2
-rw-r--r--src/libcamera/stream.cpp23
-rw-r--r--src/libcamera/sysfs.cpp2
-rw-r--r--src/libcamera/tracepoints.cpp2
-rw-r--r--src/libcamera/transform.cpp113
-rw-r--r--src/libcamera/v4l2_device.cpp88
-rw-r--r--src/libcamera/v4l2_pixelformat.cpp84
-rw-r--r--src/libcamera/v4l2_subdevice.cpp1312
-rw-r--r--src/libcamera/v4l2_videodevice.cpp116
-rw-r--r--src/libcamera/version.cpp.in2
-rw-r--r--src/libcamera/yaml_parser.cpp358
-rw-r--r--src/meson.build9
-rwxr-xr-xsrc/py/cam/cam.py52
-rw-r--r--src/py/cam/helpers.py6
-rwxr-xr-xsrc/py/examples/simple-cam.py20
-rwxr-xr-xsrc/py/examples/simple-capture.py33
-rwxr-xr-xsrc/py/examples/simple-continuous-capture.py26
-rwxr-xr-xsrc/py/libcamera/gen-py-controls.py90
-rw-r--r--src/py/libcamera/meson.build52
-rw-r--r--src/py/libcamera/py_camera_manager.cpp131
-rw-r--r--src/py/libcamera/py_camera_manager.h45
-rw-r--r--src/py/libcamera/py_color_space.cpp70
-rw-r--r--src/py/libcamera/py_controls_generated.cpp.in8
-rw-r--r--src/py/libcamera/py_enums.cpp12
-rw-r--r--src/py/libcamera/py_formats_generated.cpp.in2
-rw-r--r--src/py/libcamera/py_geometry.cpp2
-rw-r--r--src/py/libcamera/py_helpers.cpp97
-rw-r--r--src/py/libcamera/py_helpers.h13
-rw-r--r--src/py/libcamera/py_main.cpp455
-rw-r--r--src/py/libcamera/py_main.h14
-rw-r--r--src/py/libcamera/py_properties_generated.cpp.in8
-rw-r--r--src/py/libcamera/py_transform.cpp81
-rw-r--r--src/py/meson.build2
-rw-r--r--src/v4l2/meson.build7
-rw-r--r--src/v4l2/v4l2_camera.cpp7
-rw-r--r--src/v4l2/v4l2_camera.h16
-rw-r--r--src/v4l2/v4l2_camera_file.cpp2
-rw-r--r--src/v4l2/v4l2_camera_file.h2
-rw-r--r--src/v4l2/v4l2_camera_proxy.cpp20
-rw-r--r--src/v4l2/v4l2_camera_proxy.h14
-rw-r--r--src/v4l2/v4l2_compat.cpp2
-rw-r--r--src/v4l2/v4l2_compat_manager.cpp34
-rw-r--r--src/v4l2/v4l2_compat_manager.h2
865 files changed, 48075 insertions, 19332 deletions
diff --git a/src/android/camera3_hal.cpp b/src/android/camera3_hal.cpp
index da836bae..a5ad2374 100644
--- a/src/android/camera3_hal.cpp
+++ b/src/android/camera3_hal.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera3_hal.cpp - Android Camera HALv3 module
+ * Android Camera HALv3 module
*/
#include <hardware/camera_common.h>
diff --git a/src/android/camera_buffer.h b/src/android/camera_buffer.h
index b4531c80..96669962 100644
--- a/src/android/camera_buffer.h
+++ b/src/android/camera_buffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_buffer.h - Frame buffer handling interface definition
+ * Frame buffer handling interface definition
*/
#pragma once
diff --git a/src/android/camera_capabilities.cpp b/src/android/camera_capabilities.cpp
index 6f197eb8..6f4d48de 100644
--- a/src/android/camera_capabilities.cpp
+++ b/src/android/camera_capabilities.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_capabilities.cpp - Camera static properties manager
+ * Camera static properties manager
*/
#include "camera_capabilities.h"
@@ -31,13 +31,20 @@ namespace {
/*
* \var camera3Resolutions
- * \brief The list of image resolutions defined as mandatory to be supported by
- * the Android Camera3 specification
+ * \brief The list of image resolutions commonly supported by Android
+ *
+ * The following are defined as mandatory to be supported by the Android
+ * Camera3 specification: (320x240), (640x480), (1280x720), (1920x1080).
+ *
+ * The following 4:3 resolutions are defined as optional, but commonly
+ * supported by Android devices: (1280x960), (1600x1200).
*/
const std::vector<Size> camera3Resolutions = {
{ 320, 240 },
{ 640, 480 },
{ 1280, 720 },
+ { 1280, 960 },
+ { 1600, 1200 },
{ 1920, 1080 }
};
@@ -367,14 +374,20 @@ void CameraCapabilities::computeHwLevel(
camera_metadata_enum_android_info_supported_hardware_level
hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL;
- if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR))
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR)) {
+ LOG(HAL, Info) << noFull << "missing manual sensor";
hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
- if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING))
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING)) {
+ LOG(HAL, Info) << noFull << "missing manual post processing";
hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
- if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE))
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE)) {
+ LOG(HAL, Info) << noFull << "missing burst capture";
hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
found = staticMetadata_->getEntry(ANDROID_SYNC_MAX_LATENCY, &entry);
if (!found || *entry.data.i32 != 0) {
@@ -475,7 +488,7 @@ int CameraCapabilities::initializeStreamConfigurations()
* \todo Get this from the camera properties once defined
*/
std::unique_ptr<CameraConfiguration> cameraConfig =
- camera_->generateConfiguration({ StillCapture });
+ camera_->generateConfiguration({ StreamRole::StillCapture });
if (!cameraConfig) {
LOG(HAL, Error) << "Failed to get maximum resolution";
return -EINVAL;
@@ -492,8 +505,8 @@ int CameraCapabilities::initializeStreamConfigurations()
/*
* Build the list of supported image resolutions.
*
- * The resolutions listed in camera3Resolution are mandatory to be
- * supported, up to the camera maximum resolution.
+ * The resolutions listed in camera3Resolution are supported, up to the
+ * camera maximum resolution.
*
* Augment the list by adding resolutions calculated from the camera
* maximum one.
@@ -687,6 +700,14 @@ int CameraCapabilities::initializeStreamConfigurations()
minFrameDuration = minFrameDurationCap;
}
+ /*
+ * Calculate FPS as CTS does and adjust the minimum
+ * frame duration accordingly: see
+ * Camera2SurfaceViewTestCase.java:getSuitableFpsRangeForDuration()
+ */
+ minFrameDuration =
+ 1e9 / static_cast<unsigned int>(floor(1e9 / minFrameDuration + 0.05f));
+
streamConfigurations_.push_back({
res, androidFormat, minFrameDuration, maxFrameDuration,
});
@@ -1042,18 +1063,18 @@ int CameraCapabilities::initializeStaticMetadata()
/* Sensor static metadata. */
std::array<int32_t, 2> pixelArraySize;
{
- const Size &size = properties.get(properties::PixelArraySize);
+ const Size &size = properties.get(properties::PixelArraySize).value_or(Size{});
pixelArraySize[0] = size.width;
pixelArraySize[1] = size.height;
staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
pixelArraySize);
}
- if (properties.contains(properties::UnitCellSize)) {
- const Size &cellSize = properties.get<Size>(properties::UnitCellSize);
+ const auto &cellSize = properties.get<Size>(properties::UnitCellSize);
+ if (cellSize) {
std::array<float, 2> physicalSize{
- cellSize.width * pixelArraySize[0] / 1e6f,
- cellSize.height * pixelArraySize[1] / 1e6f
+ cellSize->width * pixelArraySize[0] / 1e6f,
+ cellSize->height * pixelArraySize[1] / 1e6f
};
staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
physicalSize);
@@ -1061,7 +1082,7 @@ int CameraCapabilities::initializeStaticMetadata()
{
const Span<const Rectangle> &rects =
- properties.get(properties::PixelArrayActiveAreas);
+ properties.get(properties::PixelArrayActiveAreas).value_or(Span<const Rectangle>{});
std::vector<int32_t> data{
static_cast<int32_t>(rects[0].x),
static_cast<int32_t>(rects[0].y),
@@ -1079,11 +1100,10 @@ int CameraCapabilities::initializeStaticMetadata()
sensitivityRange);
/* Report the color filter arrangement if the camera reports it. */
- if (properties.contains(properties::draft::ColorFilterArrangement)) {
- uint8_t filterArr = properties.get(properties::draft::ColorFilterArrangement);
+ const auto &filterArr = properties.get(properties::draft::ColorFilterArrangement);
+ if (filterArr)
staticMetadata_->addEntry(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
- filterArr);
- }
+ *filterArr);
const auto &exposureInfo = controlsInfo.find(&controls::ExposureTime);
if (exposureInfo != controlsInfo.end()) {
@@ -1287,12 +1307,10 @@ int CameraCapabilities::initializeStaticMetadata()
* recording profile. Inspecting the Intel IPU3 HAL
* implementation confirms this but no reference has been found
* in the metadata documentation.
- *
- * Calculate FPS as CTS does: see
- * Camera2SurfaceViewTestCase.java:getSuitableFpsRangeForDuration()
*/
- unsigned int fps = static_cast<unsigned int>
- (floor(1e9 / entry.minFrameDurationNsec + 0.05f));
+ unsigned int fps =
+ static_cast<unsigned int>(floor(1e9 / entry.minFrameDurationNsec));
+
if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB && fps < 30)
continue;
diff --git a/src/android/camera_capabilities.h b/src/android/camera_capabilities.h
index 6f66f221..56ac1efe 100644
--- a/src/android/camera_capabilities.h
+++ b/src/android/camera_capabilities.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_capabilities.h - Camera static properties manager
+ * Camera static properties manager
*/
#pragma once
diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp
index 8c039fb9..493f66e7 100644
--- a/src/android/camera_device.cpp
+++ b/src/android/camera_device.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.cpp - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
#include "camera_device.h"
@@ -30,6 +30,7 @@
#include "camera_hal_config.h"
#include "camera_ops.h"
#include "camera_request.h"
+#include "hal_framebuffer.h"
using namespace libcamera;
@@ -305,9 +306,9 @@ int CameraDevice::initialize(const CameraConfigData *cameraConfigData)
*/
const ControlList &properties = camera_->properties();
- if (properties.contains(properties::Location)) {
- int32_t location = properties.get(properties::Location);
- switch (location) {
+ const auto &location = properties.get(properties::Location);
+ if (location) {
+ switch (*location) {
case properties::CameraLocationFront:
facing_ = CAMERA_FACING_FRONT;
break;
@@ -355,9 +356,9 @@ int CameraDevice::initialize(const CameraConfigData *cameraConfigData)
* value for clockwise direction as required by the Android orientation
* metadata.
*/
- if (properties.contains(properties::Rotation)) {
- int rotation = properties.get(properties::Rotation);
- orientation_ = (360 - rotation) % 360;
+ const auto &rotation = properties.get(properties::Rotation);
+ if (rotation) {
+ orientation_ = (360 - *rotation) % 360;
if (cameraConfigData && cameraConfigData->rotation != -1 &&
orientation_ != cameraConfigData->rotation) {
LOG(HAL, Warning)
@@ -432,8 +433,6 @@ void CameraDevice::flush()
void CameraDevice::stop()
{
MutexLocker stateLock(stateMutex_);
- if (state_ == State::Stopped)
- return;
camera_->stop();
@@ -771,7 +770,7 @@ int CameraDevice::configureStreams(camera3_stream_configuration_t *stream_list)
return 0;
}
-std::unique_ptr<FrameBuffer>
+std::unique_ptr<HALFrameBuffer>
CameraDevice::createFrameBuffer(const buffer_handle_t camera3buffer,
PixelFormat pixelFormat, const Size &size)
{
@@ -794,7 +793,7 @@ CameraDevice::createFrameBuffer(const buffer_handle_t camera3buffer,
planes[i].length = buf.size(i);
}
- return std::make_unique<FrameBuffer>(planes);
+ return std::make_unique<HALFrameBuffer>(planes, camera3buffer);
}
int CameraDevice::processControls(Camera3RequestDescriptor *descriptor)
@@ -951,8 +950,8 @@ int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Reques
*/
if (camera3Request->settings)
lastSettings_ = camera3Request->settings;
- else
- descriptor->settings_ = lastSettings_;
+
+ descriptor->settings_ = lastSettings_;
LOG(HAL, Debug) << "Queueing request " << descriptor->request_->cookie()
<< " with " << descriptor->buffers_.size() << " streams";
@@ -1076,7 +1075,7 @@ int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Reques
descriptor->request_->addBuffer(sourceStream->stream(),
frameBuffer, nullptr);
- requestedStreams.erase(sourceStream);
+ requestedStreams.insert(sourceStream);
}
/*
@@ -1107,6 +1106,8 @@ int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Reques
}
if (state_ == State::Stopped) {
+ lastSettings_ = {};
+
ret = camera_->start();
if (ret) {
LOG(HAL, Error) << "Failed to start camera";
@@ -1181,7 +1182,8 @@ void CameraDevice::requestComplete(Request *request)
* as soon as possible, earlier than request completion time.
*/
uint64_t sensorTimestamp = static_cast<uint64_t>(request->metadata()
- .get(controls::SensorTimestamp));
+ .get(controls::SensorTimestamp)
+ .value_or(0));
notifyShutter(descriptor->frameNumber_, sensorTimestamp);
LOG(HAL, Debug) << "Request " << request->cookie() << " completed with "
@@ -1560,29 +1562,27 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
rolling_shutter_skew);
/* Add metadata tags reported by libcamera. */
- const int64_t timestamp = metadata.get(controls::SensorTimestamp);
+ const int64_t timestamp = metadata.get(controls::SensorTimestamp).value_or(0);
resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, timestamp);
- if (metadata.contains(controls::draft::PipelineDepth)) {
- uint8_t pipeline_depth =
- metadata.get<int32_t>(controls::draft::PipelineDepth);
+ const auto &pipelineDepth = metadata.get(controls::draft::PipelineDepth);
+ if (pipelineDepth)
resultMetadata->addEntry(ANDROID_REQUEST_PIPELINE_DEPTH,
- pipeline_depth);
- }
+ *pipelineDepth);
- if (metadata.contains(controls::ExposureTime)) {
- int64_t exposure = metadata.get(controls::ExposureTime) * 1000ULL;
- resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME, exposure);
- }
+ const auto &exposureTime = metadata.get(controls::ExposureTime);
+ if (exposureTime)
+ resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
+ *exposureTime * 1000ULL);
- if (metadata.contains(controls::FrameDuration)) {
- int64_t duration = metadata.get(controls::FrameDuration) * 1000;
+ const auto &frameDuration = metadata.get(controls::FrameDuration);
+ if (frameDuration)
resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION,
- duration);
- }
+ *frameDuration * 1000);
- if (metadata.contains(controls::ScalerCrop)) {
- Rectangle crop = metadata.get(controls::ScalerCrop);
+ const auto &scalerCrop = metadata.get(controls::ScalerCrop);
+ if (scalerCrop) {
+ const Rectangle &crop = *scalerCrop;
int32_t cropRect[] = {
crop.x, crop.y, static_cast<int32_t>(crop.width),
static_cast<int32_t>(crop.height),
@@ -1590,12 +1590,10 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, cropRect);
}
- if (metadata.contains(controls::draft::TestPatternMode)) {
- const int32_t testPatternMode =
- metadata.get(controls::draft::TestPatternMode);
+ const auto &testPatternMode = metadata.get(controls::draft::TestPatternMode);
+ if (testPatternMode)
resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE,
- testPatternMode);
- }
+ *testPatternMode);
/*
* Return the result metadata pack even is not valid: get() will return
diff --git a/src/android/camera_device.h b/src/android/camera_device.h
index 64050416..194ca303 100644
--- a/src/android/camera_device.h
+++ b/src/android/camera_device.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.h - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
#pragma once
@@ -29,6 +29,7 @@
#include "camera_capabilities.h"
#include "camera_metadata.h"
#include "camera_stream.h"
+#include "hal_framebuffer.h"
#include "jpeg/encoder.h"
class Camera3RequestDescriptor;
@@ -83,7 +84,7 @@ private:
void stop() LIBCAMERA_TSA_EXCLUDES(stateMutex_);
- std::unique_ptr<libcamera::FrameBuffer>
+ std::unique_ptr<HALFrameBuffer>
createFrameBuffer(const buffer_handle_t camera3buffer,
libcamera::PixelFormat pixelFormat,
const libcamera::Size &size);
diff --git a/src/android/camera_hal_config.cpp b/src/android/camera_hal_config.cpp
index bacfe4b9..7ef451ef 100644
--- a/src/android/camera_hal_config.cpp
+++ b/src/android/camera_hal_config.cpp
@@ -2,11 +2,10 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_hal_config.cpp - Camera HAL configuration file manager
+ * Camera HAL configuration file manager
*/
#include "camera_hal_config.h"
-#include <filesystem>
#include <stdlib.h>
#include <string>
@@ -160,15 +159,15 @@ CameraHalConfig::CameraHalConfig()
*/
int CameraHalConfig::parseConfigurationFile()
{
- std::filesystem::path filePath = LIBCAMERA_SYSCONF_DIR;
- filePath /= "camera_hal.yaml";
- if (!std::filesystem::is_regular_file(filePath)) {
+ std::string filePath = LIBCAMERA_SYSCONF_DIR "/camera_hal.yaml";
+
+ File file(filePath);
+ if (!file.exists()) {
LOG(HALConfig, Debug)
<< "Configuration file: \"" << filePath << "\" not found";
return -ENOENT;
}
- File file(filePath);
if (!file.open(File::OpenModeFlag::ReadOnly)) {
int ret = file.error();
LOG(HALConfig, Error) << "Failed to open configuration file "
diff --git a/src/android/camera_hal_config.h b/src/android/camera_hal_config.h
index 9df554f9..a4bedb6e 100644
--- a/src/android/camera_hal_config.h
+++ b/src/android/camera_hal_config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_hal_config.h - Camera HAL configuration file manager
+ * Camera HAL configuration file manager
*/
#pragma once
diff --git a/src/android/camera_hal_manager.cpp b/src/android/camera_hal_manager.cpp
index 5f7bfe26..7500c749 100644
--- a/src/android/camera_hal_manager.cpp
+++ b/src/android/camera_hal_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_hal_manager.cpp - libcamera Android Camera Manager
+ * libcamera Android Camera Manager
*/
#include "camera_hal_manager.h"
@@ -140,7 +140,8 @@ void CameraHalManager::cameraAdded(std::shared_ptr<Camera> cam)
*/
if (!isCameraExternal && !halConfig_.exists()) {
LOG(HAL, Error)
- << "HAL configuration file is mandatory for internal cameras";
+ << "HAL configuration file is mandatory for internal cameras."
+ << " Camera " << cam->id() << " failed to load";
return;
}
@@ -228,11 +229,7 @@ void CameraHalManager::cameraRemoved(std::shared_ptr<Camera> cam)
int32_t CameraHalManager::cameraLocation(const Camera *cam)
{
- const ControlList &properties = cam->properties();
- if (!properties.contains(properties::Location))
- return -1;
-
- return properties.get(properties::Location);
+ return cam->properties().get(properties::Location).value_or(-1);
}
CameraDevice *CameraHalManager::cameraDeviceFromHalId(unsigned int id)
diff --git a/src/android/camera_hal_manager.h b/src/android/camera_hal_manager.h
index a5f8b933..836a8daf 100644
--- a/src/android/camera_hal_manager.h
+++ b/src/android/camera_hal_manager.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_hal_manager.h - libcamera Android Camera Manager
+ * libcamera Android Camera Manager
*/
#pragma once
diff --git a/src/android/camera_metadata.cpp b/src/android/camera_metadata.cpp
index b3e515d2..99f033f9 100644
--- a/src/android/camera_metadata.cpp
+++ b/src/android/camera_metadata.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_metadata.cpp - libcamera Android Camera Metadata Helper
+ * libcamera Android Camera Metadata Helper
*/
#include "camera_metadata.h"
diff --git a/src/android/camera_metadata.h b/src/android/camera_metadata.h
index 0c31ec6b..474f280c 100644
--- a/src/android/camera_metadata.h
+++ b/src/android/camera_metadata.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_metadata.h - libcamera Android Camera Metadata Helper
+ * libcamera Android Camera Metadata Helper
*/
#pragma once
diff --git a/src/android/camera_ops.cpp b/src/android/camera_ops.cpp
index 8a3cfa17..ecaac5a3 100644
--- a/src/android/camera_ops.cpp
+++ b/src/android/camera_ops.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_ops.h - Android Camera HAL Operations
+ * Android Camera HAL Operations
*/
#include "camera_ops.h"
diff --git a/src/android/camera_ops.h b/src/android/camera_ops.h
index b501bb7e..750dc945 100644
--- a/src/android/camera_ops.h
+++ b/src/android/camera_ops.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_ops.h - Android Camera HAL Operations
+ * Android Camera HAL Operations
*/
#pragma once
diff --git a/src/android/camera_request.cpp b/src/android/camera_request.cpp
index 6c87adba..0d45960d 100644
--- a/src/android/camera_request.cpp
+++ b/src/android/camera_request.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Google Inc.
*
- * camera_request.cpp - libcamera Android Camera Request Descriptor
+ * libcamera Android Camera Request Descriptor
*/
#include "camera_request.h"
diff --git a/src/android/camera_request.h b/src/android/camera_request.h
index 37b6ae32..5b479180 100644
--- a/src/android/camera_request.h
+++ b/src/android/camera_request.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Google Inc.
*
- * camera_request.h - libcamera Android Camera Request Descriptor
+ * libcamera Android Camera Request Descriptor
*/
#pragma once
@@ -21,6 +21,7 @@
#include <hardware/camera3.h>
#include "camera_metadata.h"
+#include "hal_framebuffer.h"
class CameraBuffer;
class CameraStream;
@@ -44,7 +45,7 @@ public:
CameraStream *stream;
buffer_handle_t *camera3Buffer;
- std::unique_ptr<libcamera::FrameBuffer> frameBuffer;
+ std::unique_ptr<HALFrameBuffer> frameBuffer;
libcamera::UniqueFD fence;
Status status = Status::Success;
libcamera::FrameBuffer *internalBuffer = nullptr;
diff --git a/src/android/camera_stream.cpp b/src/android/camera_stream.cpp
index 045e6006..1d68540d 100644
--- a/src/android/camera_stream.cpp
+++ b/src/android/camera_stream.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * camera_stream.cpp - Camera HAL stream
+ * Camera HAL stream
*/
#include "camera_stream.h"
diff --git a/src/android/camera_stream.h b/src/android/camera_stream.h
index 4c5078b2..395552da 100644
--- a/src/android/camera_stream.h
+++ b/src/android/camera_stream.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * camera_stream.h - Camera HAL stream
+ * Camera HAL stream
*/
#pragma once
diff --git a/src/android/cros/camera3_hal.cpp b/src/android/cros/camera3_hal.cpp
index fb863b5f..6010a5ad 100644
--- a/src/android/cros/camera3_hal.cpp
+++ b/src/android/cros/camera3_hal.cpp
@@ -2,15 +2,17 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera3_hal.cpp - cros-specific components of Android Camera HALv3 module
+ * cros-specific components of Android Camera HALv3 module
*/
#include <cros-camera/cros_camera_hal.h>
#include "../camera_hal_manager.h"
+#include "../cros_mojo_token.h"
-static void set_up([[maybe_unused]] cros::CameraMojoChannelManagerToken *token)
+static void set_up(cros::CameraMojoChannelManagerToken *token)
{
+ gCrosMojoToken = token;
}
static void tear_down()
diff --git a/src/android/cros_mojo_token.h b/src/android/cros_mojo_token.h
new file mode 100644
index 00000000..d0baa80f
--- /dev/null
+++ b/src/android/cros_mojo_token.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * cros-specific mojo token
+ */
+
+#pragma once
+
+#include <cros-camera/cros_camera_hal.h>
+
+inline cros::CameraMojoChannelManagerToken *gCrosMojoToken = nullptr;
diff --git a/src/android/data/nautilus/camera_hal.yaml b/src/android/data/nautilus/camera_hal.yaml
index faddd29e..2105fcca 100644
--- a/src/android/data/nautilus/camera_hal.yaml
+++ b/src/android/data/nautilus/camera_hal.yaml
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
cameras:
"\\_SB_.PCI0.I2C2.CAM0":
location: back
diff --git a/src/android/data/soraka/camera_hal.yaml b/src/android/data/soraka/camera_hal.yaml
index 2e996403..d886af06 100644
--- a/src/android/data/soraka/camera_hal.yaml
+++ b/src/android/data/soraka/camera_hal.yaml
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
cameras:
"\\_SB_.PCI0.I2C4.CAM1":
location: front
diff --git a/src/android/frame_buffer_allocator.h b/src/android/frame_buffer_allocator.h
index 5d2eeda1..3e68641c 100644
--- a/src/android/frame_buffer_allocator.h
+++ b/src/android/frame_buffer_allocator.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * frame_buffer_allocator.h - Interface definition to allocate Frame buffer in
+ * Interface definition to allocate Frame buffer in
* platform dependent way.
*/
#ifndef __ANDROID_FRAME_BUFFER_ALLOCATOR_H__
@@ -13,9 +13,10 @@
#include <libcamera/base/class.h>
#include <libcamera/camera.h>
-#include <libcamera/framebuffer.h>
#include <libcamera/geometry.h>
+#include "hal_framebuffer.h"
+
class CameraDevice;
class PlatformFrameBufferAllocator : libcamera::Extensible
@@ -31,7 +32,7 @@ public:
* Note: The returned FrameBuffer needs to be destroyed before
* PlatformFrameBufferAllocator is destroyed.
*/
- std::unique_ptr<libcamera::FrameBuffer> allocate(
+ std::unique_ptr<HALFrameBuffer> allocate(
int halPixelFormat, const libcamera::Size &size, uint32_t usage);
};
@@ -44,7 +45,7 @@ PlatformFrameBufferAllocator::PlatformFrameBufferAllocator( \
PlatformFrameBufferAllocator::~PlatformFrameBufferAllocator() \
{ \
} \
-std::unique_ptr<libcamera::FrameBuffer> \
+std::unique_ptr<HALFrameBuffer> \
PlatformFrameBufferAllocator::allocate(int halPixelFormat, \
const libcamera::Size &size, \
uint32_t usage) \
diff --git a/src/android/hal_framebuffer.cpp b/src/android/hal_framebuffer.cpp
new file mode 100644
index 00000000..d4899f45
--- /dev/null
+++ b/src/android/hal_framebuffer.cpp
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * HAL Frame Buffer Handling
+ */
+
+#include "hal_framebuffer.h"
+
+#include <hardware/camera3.h>
+
+HALFrameBuffer::HALFrameBuffer(std::unique_ptr<Private> d,
+ buffer_handle_t handle)
+ : FrameBuffer(std::move(d)), handle_(handle)
+{
+}
+
+HALFrameBuffer::HALFrameBuffer(const std::vector<Plane> &planes,
+ buffer_handle_t handle)
+ : FrameBuffer(planes), handle_(handle)
+{
+}
diff --git a/src/android/hal_framebuffer.h b/src/android/hal_framebuffer.h
new file mode 100644
index 00000000..cea49e2d
--- /dev/null
+++ b/src/android/hal_framebuffer.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * HAL Frame Buffer Handling
+ */
+
+#pragma once
+
+#include "libcamera/internal/framebuffer.h"
+
+#include <hardware/camera3.h>
+
+class HALFrameBuffer final : public libcamera::FrameBuffer
+{
+public:
+ HALFrameBuffer(std::unique_ptr<Private> d,
+ buffer_handle_t handle);
+ HALFrameBuffer(const std::vector<Plane> &planes,
+ buffer_handle_t handle);
+
+ buffer_handle_t handle() const { return handle_; }
+
+private:
+ buffer_handle_t handle_;
+};
diff --git a/src/android/jpeg/encoder.h b/src/android/jpeg/encoder.h
index b974d367..ed033c19 100644
--- a/src/android/jpeg/encoder.h
+++ b/src/android/jpeg/encoder.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * encoder.h - Image encoding interface
+ * Image encoding interface
*/
#pragma once
@@ -12,14 +12,15 @@
#include <libcamera/framebuffer.h>
#include <libcamera/stream.h>
+#include "../camera_request.h"
+
class Encoder
{
public:
virtual ~Encoder() = default;
virtual int configure(const libcamera::StreamConfiguration &cfg) = 0;
- virtual int encode(const libcamera::FrameBuffer &source,
- libcamera::Span<uint8_t> destination,
+ virtual int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
libcamera::Span<const uint8_t> exifData,
unsigned int quality) = 0;
};
diff --git a/src/android/jpeg/encoder_jea.cpp b/src/android/jpeg/encoder_jea.cpp
new file mode 100644
index 00000000..25dc4317
--- /dev/null
+++ b/src/android/jpeg/encoder_jea.cpp
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * JPEG encoding using CrOS JEA
+ */
+
+#include "encoder_jea.h"
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <cros-camera/camera_mojo_channel_manager_token.h>
+
+#include "../cros_mojo_token.h"
+#include "../hal_framebuffer.h"
+
+EncoderJea::EncoderJea() = default;
+
+EncoderJea::~EncoderJea() = default;
+
+int EncoderJea::configure(const libcamera::StreamConfiguration &cfg)
+{
+ size_ = cfg.size;
+
+ if (jpegCompressor_)
+ return 0;
+
+ if (gCrosMojoToken == nullptr)
+ return -ENOTSUP;
+
+ jpegCompressor_ = cros::JpegCompressor::GetInstance(gCrosMojoToken);
+
+ return 0;
+}
+
+int EncoderJea::encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality)
+{
+ if (!jpegCompressor_)
+ return -ENOTSUP;
+
+ uint32_t outDataSize = 0;
+ const HALFrameBuffer *fb =
+ dynamic_cast<const HALFrameBuffer *>(buffer->srcBuffer);
+
+ if (!jpegCompressor_->CompressImageFromHandle(fb->handle(),
+ *buffer->camera3Buffer,
+ size_.width, size_.height,
+ quality, exifData.data(),
+ exifData.size(),
+ &outDataSize))
+ return -EBUSY;
+
+ return outDataSize;
+}
diff --git a/src/android/jpeg/encoder_jea.h b/src/android/jpeg/encoder_jea.h
new file mode 100644
index 00000000..91115d2e
--- /dev/null
+++ b/src/android/jpeg/encoder_jea.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * JPEG encoding using CrOS JEA
+ */
+
+#pragma once
+
+#include <libcamera/geometry.h>
+
+#include <cros-camera/jpeg_compressor.h>
+
+#include "encoder.h"
+
+class EncoderJea : public Encoder
+{
+public:
+ EncoderJea();
+ ~EncoderJea();
+
+ int configure(const libcamera::StreamConfiguration &cfg) override;
+ int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality) override;
+
+private:
+ libcamera::Size size_;
+
+ std::unique_ptr<cros::JpegCompressor> jpegCompressor_;
+};
diff --git a/src/android/jpeg/encoder_libjpeg.cpp b/src/android/jpeg/encoder_libjpeg.cpp
index fd62bd9c..7fc6287e 100644
--- a/src/android/jpeg/encoder_libjpeg.cpp
+++ b/src/android/jpeg/encoder_libjpeg.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * encoder_libjpeg.cpp - JPEG encoding using libjpeg native API
+ * JPEG encoding using libjpeg native API
*/
#include "encoder_libjpeg.h"
@@ -24,6 +24,8 @@
#include "libcamera/internal/formats.h"
#include "libcamera/internal/mapped_framebuffer.h"
+#include "../camera_buffer.h"
+
using namespace libcamera;
LOG_DECLARE_CATEGORY(JPEG)
@@ -178,17 +180,20 @@ void EncoderLibJpeg::compressNV(const std::vector<Span<uint8_t>> &planes)
}
}
-int EncoderLibJpeg::encode(const FrameBuffer &source, Span<uint8_t> dest,
- Span<const uint8_t> exifData, unsigned int quality)
+int EncoderLibJpeg::encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality)
{
- MappedFrameBuffer frame(&source, MappedFrameBuffer::MapFlag::Read);
+ MappedFrameBuffer frame(buffer->srcBuffer,
+ MappedFrameBuffer::MapFlag::Read);
if (!frame.isValid()) {
LOG(JPEG, Error) << "Failed to map FrameBuffer : "
<< strerror(frame.error());
return frame.error();
}
- return encode(frame.planes(), dest, exifData, quality);
+ return encode(frame.planes(), buffer->dstBuffer->plane(0),
+ exifData, quality);
}
int EncoderLibJpeg::encode(const std::vector<Span<uint8_t>> &src,
diff --git a/src/android/jpeg/encoder_libjpeg.h b/src/android/jpeg/encoder_libjpeg.h
index 1b3ac067..4ac85c22 100644
--- a/src/android/jpeg/encoder_libjpeg.h
+++ b/src/android/jpeg/encoder_libjpeg.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * encoder_libjpeg.h - JPEG encoding using libjpeg
+ * JPEG encoding using libjpeg
*/
#pragma once
@@ -22,8 +22,7 @@ public:
~EncoderLibJpeg();
int configure(const libcamera::StreamConfiguration &cfg) override;
- int encode(const libcamera::FrameBuffer &source,
- libcamera::Span<uint8_t> destination,
+ int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
libcamera::Span<const uint8_t> exifData,
unsigned int quality) override;
int encode(const std::vector<libcamera::Span<uint8_t>> &planes,
diff --git a/src/android/jpeg/exif.cpp b/src/android/jpeg/exif.cpp
index 3220b458..b8c871df 100644
--- a/src/android/jpeg/exif.cpp
+++ b/src/android/jpeg/exif.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * exif.cpp - EXIF tag creation using libexif
+ * EXIF tag creation using libexif
*/
#include "exif.h"
@@ -430,16 +430,13 @@ void Exif::setOrientation(int orientation)
setShort(EXIF_IFD_0, EXIF_TAG_ORIENTATION, value);
}
-/*
- * The thumbnail data should remain valid until the Exif object is destroyed.
- * Failing to do so, might result in no thumbnail data being set even after a
- * call to Exif::setThumbnail().
- */
-void Exif::setThumbnail(Span<const unsigned char> thumbnail,
+void Exif::setThumbnail(std::vector<unsigned char> &&thumbnail,
Compression compression)
{
- data_->data = const_cast<unsigned char *>(thumbnail.data());
- data_->size = thumbnail.size();
+ thumbnailData_ = std::move(thumbnail);
+
+ data_->data = thumbnailData_.data();
+ data_->size = thumbnailData_.size();
setShort(EXIF_IFD_0, EXIF_TAG_COMPRESSION, compression);
}
diff --git a/src/android/jpeg/exif.h b/src/android/jpeg/exif.h
index 2ff8fb78..446d53f3 100644
--- a/src/android/jpeg/exif.h
+++ b/src/android/jpeg/exif.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * exif.h - EXIF tag creator using libexif
+ * EXIF tag creator using libexif
*/
#pragma once
@@ -10,6 +10,7 @@
#include <chrono>
#include <string>
#include <time.h>
+#include <vector>
#include <libexif/exif-data.h>
@@ -60,7 +61,7 @@ public:
void setOrientation(int orientation);
void setSize(const libcamera::Size &size);
- void setThumbnail(libcamera::Span<const unsigned char> thumbnail,
+ void setThumbnail(std::vector<unsigned char> &&thumbnail,
Compression compression);
void setTimestamp(time_t timestamp, std::chrono::milliseconds msec);
@@ -106,4 +107,6 @@ private:
unsigned char *exifData_;
unsigned int size_;
+
+ std::vector<unsigned char> thumbnailData_;
};
diff --git a/src/android/jpeg/meson.build b/src/android/jpeg/meson.build
new file mode 100644
index 00000000..3402e614
--- /dev/null
+++ b/src/android/jpeg/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: CC0-1.0
+
+android_hal_sources += files([
+ 'encoder_libjpeg.cpp',
+ 'exif.cpp',
+ 'post_processor_jpeg.cpp',
+ 'thumbnailer.cpp'
+])
+
+platform = get_option('android_platform')
+if platform == 'cros'
+ android_hal_sources += files(['encoder_jea.cpp'])
+ android_deps += [dependency('libcros_camera')]
+endif
diff --git a/src/android/jpeg/post_processor_jpeg.cpp b/src/android/jpeg/post_processor_jpeg.cpp
index d72ebc3c..89b8a401 100644
--- a/src/android/jpeg/post_processor_jpeg.cpp
+++ b/src/android/jpeg/post_processor_jpeg.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * post_processor_jpeg.cpp - JPEG Post Processor
+ * JPEG Post Processor
*/
#include "post_processor_jpeg.h"
@@ -12,7 +12,11 @@
#include "../camera_device.h"
#include "../camera_metadata.h"
#include "../camera_request.h"
+#if defined(OS_CHROMEOS)
+#include "encoder_jea.h"
+#else /* !defined(OS_CHROMEOS) */
#include "encoder_libjpeg.h"
+#endif
#include "exif.h"
#include <libcamera/base/log.h>
@@ -46,7 +50,11 @@ int PostProcessorJpeg::configure(const StreamConfiguration &inCfg,
thumbnailer_.configure(inCfg.size, inCfg.pixelFormat);
+#if defined(OS_CHROMEOS)
+ encoder_ = std::make_unique<EncoderJea>();
+#else /* !defined(OS_CHROMEOS) */
encoder_ = std::make_unique<EncoderLibJpeg>();
+#endif
return encoder_->configure(inCfg);
}
@@ -166,7 +174,7 @@ void PostProcessorJpeg::process(Camera3RequestDescriptor::StreamBuffer *streamBu
std::vector<unsigned char> thumbnail;
generateThumbnail(source, thumbnailSize, quality, &thumbnail);
if (!thumbnail.empty())
- exif.setThumbnail(thumbnail, Exif::Compression::JPEG);
+ exif.setThumbnail(std::move(thumbnail), Exif::Compression::JPEG);
}
resultMetadata->addEntry(ANDROID_JPEG_THUMBNAIL_SIZE, data, 2);
@@ -194,8 +202,7 @@ void PostProcessorJpeg::process(Camera3RequestDescriptor::StreamBuffer *streamBu
const uint8_t quality = ret ? *entry.data.u8 : 95;
resultMetadata->addEntry(ANDROID_JPEG_QUALITY, quality);
- int jpeg_size = encoder_->encode(source, destination->plane(0),
- exif.data(), quality);
+ int jpeg_size = encoder_->encode(streamBuffer, exif.data(), quality);
if (jpeg_size < 0) {
LOG(JPEG, Error) << "Failed to encode stream image";
processComplete.emit(streamBuffer, PostProcessor::Status::Error);
diff --git a/src/android/jpeg/post_processor_jpeg.h b/src/android/jpeg/post_processor_jpeg.h
index 98309b01..6fe21457 100644
--- a/src/android/jpeg/post_processor_jpeg.h
+++ b/src/android/jpeg/post_processor_jpeg.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * post_processor_jpeg.h - JPEG Post Processor
+ * JPEG Post Processor
*/
#pragma once
diff --git a/src/android/jpeg/thumbnailer.cpp b/src/android/jpeg/thumbnailer.cpp
index 41c71c76..adafc468 100644
--- a/src/android/jpeg/thumbnailer.cpp
+++ b/src/android/jpeg/thumbnailer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * thumbnailer.cpp - Simple image thumbnailer
+ * Simple image thumbnailer
*/
#include "thumbnailer.h"
diff --git a/src/android/jpeg/thumbnailer.h b/src/android/jpeg/thumbnailer.h
index d933cf0e..1b836e59 100644
--- a/src/android/jpeg/thumbnailer.h
+++ b/src/android/jpeg/thumbnailer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * thumbnailer.h - Simple image thumbnailer
+ * Simple image thumbnailer
*/
#pragma once
diff --git a/src/android/meson.build b/src/android/meson.build
index 1bba54de..68646120 100644
--- a/src/android/meson.build
+++ b/src/android/meson.build
@@ -46,16 +46,14 @@ android_hal_sources = files([
'camera_ops.cpp',
'camera_request.cpp',
'camera_stream.cpp',
- 'jpeg/encoder_libjpeg.cpp',
- 'jpeg/exif.cpp',
- 'jpeg/post_processor_jpeg.cpp',
- 'jpeg/thumbnailer.cpp',
+ 'hal_framebuffer.cpp',
'yuv/post_processor_yuv.cpp'
])
android_cpp_args = []
subdir('cros')
+subdir('jpeg')
subdir('mm')
android_camera_metadata_sources = files([
diff --git a/src/android/mm/cros_camera_buffer.cpp b/src/android/mm/cros_camera_buffer.cpp
index 2ac3dc4a..e2a44a2a 100644
--- a/src/android/mm/cros_camera_buffer.cpp
+++ b/src/android/mm/cros_camera_buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * cros_camera_buffer.cpp - Chromium OS buffer backend using CameraBufferManager
+ * Chromium OS buffer backend using CameraBufferManager
*/
#include "../camera_buffer.h"
diff --git a/src/android/mm/cros_frame_buffer_allocator.cpp b/src/android/mm/cros_frame_buffer_allocator.cpp
index 52e8c180..264c0d48 100644
--- a/src/android/mm/cros_frame_buffer_allocator.cpp
+++ b/src/android/mm/cros_frame_buffer_allocator.cpp
@@ -2,8 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * cros_frame_buffer.cpp - Allocate FrameBuffer for Chromium OS using
- * CameraBufferManager
+ * Allocate FrameBuffer for Chromium OS using CameraBufferManager
*/
#include <memory>
@@ -16,6 +15,7 @@
#include "../camera_device.h"
#include "../frame_buffer_allocator.h"
+#include "../hal_framebuffer.h"
#include "cros-camera/camera_buffer_manager.h"
using namespace libcamera;
@@ -28,8 +28,9 @@ class CrosFrameBufferData : public FrameBuffer::Private
LIBCAMERA_DECLARE_PUBLIC(FrameBuffer)
public:
- CrosFrameBufferData(cros::ScopedBufferHandle scopedHandle)
- : FrameBuffer::Private(), scopedHandle_(std::move(scopedHandle))
+ CrosFrameBufferData(cros::ScopedBufferHandle scopedHandle,
+ const std::vector<FrameBuffer::Plane> &planes)
+ : FrameBuffer::Private(planes), scopedHandle_(std::move(scopedHandle))
{
}
@@ -47,11 +48,11 @@ public:
{
}
- std::unique_ptr<libcamera::FrameBuffer>
+ std::unique_ptr<HALFrameBuffer>
allocate(int halPixelFormat, const libcamera::Size &size, uint32_t usage);
};
-std::unique_ptr<libcamera::FrameBuffer>
+std::unique_ptr<HALFrameBuffer>
PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat,
const libcamera::Size &size,
uint32_t usage)
@@ -80,9 +81,8 @@ PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat,
plane.length = cros::CameraBufferManager::GetPlaneSize(handle, i);
}
- return std::make_unique<FrameBuffer>(
- std::make_unique<CrosFrameBufferData>(std::move(scopedHandle)),
- planes);
+ return std::make_unique<HALFrameBuffer>(
+ std::make_unique<CrosFrameBufferData>(std::move(scopedHandle), planes), handle);
}
PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION
diff --git a/src/android/mm/generic_camera_buffer.cpp b/src/android/mm/generic_camera_buffer.cpp
index 1bd7090d..0ffcb445 100644
--- a/src/android/mm/generic_camera_buffer.cpp
+++ b/src/android/mm/generic_camera_buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * generic_camera_buffer.cpp - Generic Android frame buffer backend
+ * Generic Android frame buffer backend
*/
#include "../camera_buffer.h"
diff --git a/src/android/mm/generic_frame_buffer_allocator.cpp b/src/android/mm/generic_frame_buffer_allocator.cpp
index acb2fa2b..79625a9a 100644
--- a/src/android/mm/generic_frame_buffer_allocator.cpp
+++ b/src/android/mm/generic_frame_buffer_allocator.cpp
@@ -2,9 +2,10 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * generic_camera_buffer.cpp - Allocate FrameBuffer using gralloc API
+ * Allocate FrameBuffer using gralloc API
*/
+#include <dlfcn.h>
#include <memory>
#include <vector>
@@ -20,6 +21,7 @@
#include "../camera_device.h"
#include "../frame_buffer_allocator.h"
+#include "../hal_framebuffer.h"
using namespace libcamera;
@@ -32,8 +34,10 @@ class GenericFrameBufferData : public FrameBuffer::Private
public:
GenericFrameBufferData(struct alloc_device_t *allocDevice,
- buffer_handle_t handle)
- : allocDevice_(allocDevice), handle_(handle)
+ buffer_handle_t handle,
+ const std::vector<FrameBuffer::Plane> &planes)
+ : FrameBuffer::Private(planes), allocDevice_(allocDevice),
+ handle_(handle)
{
ASSERT(allocDevice_);
ASSERT(handle_);
@@ -69,20 +73,21 @@ class PlatformFrameBufferAllocator::Private : public Extensible::Private
public:
Private(CameraDevice *const cameraDevice)
: cameraDevice_(cameraDevice),
- hardwareModule_(cameraDevice->camera3Device()->common.module),
+ hardwareModule_(nullptr),
allocDevice_(nullptr)
{
+ hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &hardwareModule_);
ASSERT(hardwareModule_);
}
~Private() override;
- std::unique_ptr<libcamera::FrameBuffer>
+ std::unique_ptr<HALFrameBuffer>
allocate(int halPixelFormat, const libcamera::Size &size, uint32_t usage);
private:
const CameraDevice *const cameraDevice_;
- struct hw_module_t *const hardwareModule_;
+ const struct hw_module_t *hardwareModule_;
struct alloc_device_t *allocDevice_;
};
@@ -90,9 +95,10 @@ PlatformFrameBufferAllocator::Private::~Private()
{
if (allocDevice_)
gralloc_close(allocDevice_);
+ dlclose(hardwareModule_->dso);
}
-std::unique_ptr<libcamera::FrameBuffer>
+std::unique_ptr<HALFrameBuffer>
PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat,
const libcamera::Size &size,
uint32_t usage)
@@ -135,9 +141,10 @@ PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat,
offset += planeSize;
}
- return std::make_unique<FrameBuffer>(
- std::make_unique<GenericFrameBufferData>(allocDevice_, handle),
- planes);
+ return std::make_unique<HALFrameBuffer>(
+ std::make_unique<GenericFrameBufferData>(
+ allocDevice_, handle, planes),
+ handle);
}
PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION
diff --git a/src/android/mm/libhardware_stub.c b/src/android/mm/libhardware_stub.c
new file mode 100644
index 00000000..28faa638
--- /dev/null
+++ b/src/android/mm/libhardware_stub.c
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * Copyright (C) 2023, Ideas on Board
+ *
+ * Android libhardware stub for test compilation
+ */
+
+#include <errno.h>
+
+#include <hardware/hardware.h>
+
+int hw_get_module(const char *id __attribute__((__unused__)),
+ const struct hw_module_t **module)
+{
+ *module = NULL;
+ return -ENOTSUP;
+}
diff --git a/src/android/mm/meson.build b/src/android/mm/meson.build
index d40a3b0b..e3e0484c 100644
--- a/src/android/mm/meson.build
+++ b/src/android/mm/meson.build
@@ -4,6 +4,14 @@ platform = get_option('android_platform')
if platform == 'generic'
android_hal_sources += files(['generic_camera_buffer.cpp',
'generic_frame_buffer_allocator.cpp'])
+ android_deps += [libdl]
+
+ libhardware = dependency('libhardware', required : false)
+ if libhardware.found()
+ android_deps += [libhardware]
+ else
+ android_hal_sources += files(['libhardware_stub.c'])
+ endif
elif platform == 'cros'
android_hal_sources += files(['cros_camera_buffer.cpp',
'cros_frame_buffer_allocator.cpp'])
diff --git a/src/android/post_processor.h b/src/android/post_processor.h
index 1a205b05..b504a379 100644
--- a/src/android/post_processor.h
+++ b/src/android/post_processor.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * post_processor.h - CameraStream Post Processing Interface
+ * CameraStream Post Processing Interface
*/
#pragma once
diff --git a/src/android/yuv/post_processor_yuv.cpp b/src/android/yuv/post_processor_yuv.cpp
index ed44e6fe..c998807b 100644
--- a/src/android/yuv/post_processor_yuv.cpp
+++ b/src/android/yuv/post_processor_yuv.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * post_processor_yuv.cpp - Post Processor using libyuv
+ * Post Processor using libyuv
*/
#include "post_processor_yuv.h"
diff --git a/src/android/yuv/post_processor_yuv.h b/src/android/yuv/post_processor_yuv.h
index a7ac17c5..ed7bb1fb 100644
--- a/src/android/yuv/post_processor_yuv.h
+++ b/src/android/yuv/post_processor_yuv.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * post_processor_yuv.h - Post Processor using libyuv
+ * Post Processor using libyuv
*/
#pragma once
diff --git a/src/cam/camera_session.cpp b/src/apps/cam/camera_session.cpp
index 238186a3..f13355ba 100644
--- a/src/cam/camera_session.cpp
+++ b/src/apps/cam/camera_session.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_session.cpp - Camera capture session
+ * Camera capture session
*/
#include <iomanip>
@@ -13,9 +13,11 @@
#include <libcamera/control_ids.h>
#include <libcamera/property_ids.h>
+#include "../common/event_loop.h"
+#include "../common/stream_options.h"
+
#include "camera_session.h"
#include "capture_script.h"
-#include "event_loop.h"
#include "file_sink.h"
#ifdef HAVE_KMS
#include "kms_sink.h"
@@ -24,7 +26,6 @@
#ifdef HAVE_SDL
#include "sdl_sink.h"
#endif
-#include "stream_options.h"
using namespace libcamera;
@@ -54,7 +55,7 @@ CameraSession::CameraSession(CameraManager *cm,
return;
}
- StreamRoles roles = StreamKeyValueParser::roles(options_[OptStream]);
+ std::vector<StreamRole> roles = StreamKeyValueParser::roles(options_[OptStream]);
std::unique_ptr<CameraConfiguration> config =
camera_->generateConfiguration(roles);
@@ -64,6 +65,24 @@ CameraSession::CameraSession(CameraManager *cm,
return;
}
+ if (options_.isSet(OptOrientation)) {
+ std::string orientOpt = options_[OptOrientation].toString();
+ static const std::map<std::string, libcamera::Orientation> orientations{
+ { "rot0", libcamera::Orientation::Rotate0 },
+ { "rot180", libcamera::Orientation::Rotate180 },
+ { "mirror", libcamera::Orientation::Rotate0Mirror },
+ { "flip", libcamera::Orientation::Rotate180Mirror },
+ };
+
+ auto orientation = orientations.find(orientOpt);
+ if (orientation == orientations.end()) {
+ std::cerr << "Invalid orientation " << orientOpt << std::endl;
+ return;
+ }
+
+ config->orientation = orientation->second;
+ }
+
/* Apply configuration if explicitly requested. */
if (StreamKeyValueParser::updateConfiguration(config.get(),
options_[OptStream])) {
@@ -207,10 +226,10 @@ int CameraSession::start()
if (options_.isSet(OptFile)) {
if (!options_[OptFile].toString().empty())
- sink_ = std::make_unique<FileSink>(streamNames_,
+ sink_ = std::make_unique<FileSink>(camera_.get(), streamNames_,
options_[OptFile]);
else
- sink_ = std::make_unique<FileSink>(streamNames_);
+ sink_ = std::make_unique<FileSink>(camera_.get(), streamNames_);
}
if (sink_) {
@@ -358,7 +377,7 @@ void CameraSession::requestComplete(Request *request)
* Defer processing of the completed request to the event loop, to avoid
* blocking the camera manager thread.
*/
- EventLoop::instance()->callLater([=]() { processRequest(request); });
+ EventLoop::instance()->callLater([this, request]() { processRequest(request); });
}
void CameraSession::processRequest(Request *request)
diff --git a/src/cam/camera_session.h b/src/apps/cam/camera_session.h
index d562caae..4442fd9b 100644
--- a/src/cam/camera_session.h
+++ b/src/apps/cam/camera_session.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_session.h - Camera capture session
+ * Camera capture session
*/
#pragma once
@@ -21,7 +21,7 @@
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "options.h"
+#include "../common/options.h"
class CaptureScript;
class FrameSink;
diff --git a/src/cam/capture-script.yaml b/src/apps/cam/capture-script.yaml
index 6a749bc6..7118865e 100644
--- a/src/cam/capture-script.yaml
+++ b/src/apps/cam/capture-script.yaml
@@ -4,6 +4,19 @@
#
# A capture script allows to associate a list of controls and their values
# to frame numbers.
+#
+# The script allows defining a list of frames associated with controls
+# and an optional list of properties that can control the script behaviour.
+
+# properties:
+# # Repeat the controls every 'idx' frames.
+# - loop: idx
+#
+# # List of frame number with associated a list of controls to be applied
+# frames:
+# - frame-number:
+# Control1: value1
+# Control2: value2
# \todo Formally define the capture script structure with a schema
@@ -12,10 +25,16 @@
# libcamera::controls:: enumeration
# - Controls not supported by the camera currently operated are ignored
# - Frame numbers shall be monotonically incrementing, gaps are allowed
+# - If a loop limit is specified, frame numbers in the 'frames' list shall be
+# less than the loop control
+
+# Example: Turn brightness up and down every 460 frames
+
+properties:
+ - loop: 460
-# Example:
frames:
- - 1:
+ - 0:
Brightness: 0.0
- 40:
@@ -44,3 +63,9 @@ frames:
- 340:
Brightness: -0.8
+
+ - 380:
+ Brightness: -0.4
+
+ - 420:
+ Brightness: -0.2
diff --git a/src/apps/cam/capture_script.cpp b/src/apps/cam/capture_script.cpp
new file mode 100644
index 00000000..fc1dfa75
--- /dev/null
+++ b/src/apps/cam/capture_script.cpp
@@ -0,0 +1,662 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * Capture session configuration script
+ */
+
+#include "capture_script.h"
+
+#include <iostream>
+#include <stdio.h>
+#include <stdlib.h>
+
+using namespace libcamera;
+
+CaptureScript::CaptureScript(std::shared_ptr<Camera> camera,
+ const std::string &fileName)
+ : camera_(camera), loop_(0), valid_(false)
+{
+ FILE *fh = fopen(fileName.c_str(), "r");
+ if (!fh) {
+ int ret = -errno;
+ std::cerr << "Failed to open capture script " << fileName
+ << ": " << strerror(-ret) << std::endl;
+ return;
+ }
+
+ /*
+ * Map the camera's controls to their name so that they can be
+ * easily identified when parsing the script file.
+ */
+ for (const auto &[control, info] : camera_->controls())
+ controls_[control->name()] = control;
+
+ int ret = parseScript(fh);
+ fclose(fh);
+ if (ret)
+ return;
+
+ valid_ = true;
+}
+
+/* Retrieve the control list associated with a frame number. */
+const ControlList &CaptureScript::frameControls(unsigned int frame)
+{
+ static ControlList controls{};
+ unsigned int idx = frame;
+
+ /* If we loop, repeat the controls every 'loop_' frames. */
+ if (loop_)
+ idx = frame % loop_;
+
+ auto it = frameControls_.find(idx);
+ if (it == frameControls_.end())
+ return controls;
+
+ return it->second;
+}
+
+CaptureScript::EventPtr CaptureScript::nextEvent(yaml_event_type_t expectedType)
+{
+ EventPtr event(new yaml_event_t);
+
+ if (!yaml_parser_parse(&parser_, event.get()))
+ return nullptr;
+
+ if (expectedType != YAML_NO_EVENT && !checkEvent(event, expectedType))
+ return nullptr;
+
+ return event;
+}
+
+bool CaptureScript::checkEvent(const EventPtr &event, yaml_event_type_t expectedType) const
+{
+ if (event->type != expectedType) {
+ std::cerr << "Capture script error on line " << event->start_mark.line
+ << " column " << event->start_mark.column << ": "
+ << "Expected " << eventTypeName(expectedType)
+ << " event, got " << eventTypeName(event->type)
+ << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+std::string CaptureScript::eventScalarValue(const EventPtr &event)
+{
+ return std::string(reinterpret_cast<char *>(event->data.scalar.value),
+ event->data.scalar.length);
+}
+
+std::string CaptureScript::eventTypeName(yaml_event_type_t type)
+{
+ static const std::map<yaml_event_type_t, std::string> typeNames = {
+ { YAML_STREAM_START_EVENT, "stream-start" },
+ { YAML_STREAM_END_EVENT, "stream-end" },
+ { YAML_DOCUMENT_START_EVENT, "document-start" },
+ { YAML_DOCUMENT_END_EVENT, "document-end" },
+ { YAML_ALIAS_EVENT, "alias" },
+ { YAML_SCALAR_EVENT, "scalar" },
+ { YAML_SEQUENCE_START_EVENT, "sequence-start" },
+ { YAML_SEQUENCE_END_EVENT, "sequence-end" },
+ { YAML_MAPPING_START_EVENT, "mapping-start" },
+ { YAML_MAPPING_END_EVENT, "mapping-end" },
+ };
+
+ auto it = typeNames.find(type);
+ if (it == typeNames.end())
+ return "[type " + std::to_string(type) + "]";
+
+ return it->second;
+}
+
+int CaptureScript::parseScript(FILE *script)
+{
+ int ret = yaml_parser_initialize(&parser_);
+ if (!ret) {
+ std::cerr << "Failed to initialize yaml parser" << std::endl;
+ return ret;
+ }
+
+ /* Delete the parser upon function exit. */
+ struct ParserDeleter {
+ ParserDeleter(yaml_parser_t *parser) : parser_(parser) { }
+ ~ParserDeleter() { yaml_parser_delete(parser_); }
+ yaml_parser_t *parser_;
+ } deleter(&parser_);
+
+ yaml_parser_set_input_file(&parser_, script);
+
+ EventPtr event = nextEvent(YAML_STREAM_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ event = nextEvent(YAML_DOCUMENT_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ event = nextEvent(YAML_MAPPING_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ while (1) {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ if (event->type == YAML_MAPPING_END_EVENT)
+ return 0;
+
+ if (!checkEvent(event, YAML_SCALAR_EVENT))
+ return -EINVAL;
+
+ std::string section = eventScalarValue(event);
+
+ if (section == "properties") {
+ ret = parseProperties();
+ if (ret)
+ return ret;
+ } else if (section == "frames") {
+ ret = parseFrames();
+ if (ret)
+ return ret;
+ } else {
+ std::cerr << "Unsupported section '" << section << "'"
+ << std::endl;
+ return -EINVAL;
+ }
+ }
+}
+
+int CaptureScript::parseProperty()
+{
+ EventPtr event = nextEvent(YAML_MAPPING_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ std::string prop = parseScalar();
+ if (prop.empty())
+ return -EINVAL;
+
+ if (prop == "loop") {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ std::string value = eventScalarValue(event);
+ if (value.empty())
+ return -EINVAL;
+
+ loop_ = atoi(value.c_str());
+ if (!loop_) {
+ std::cerr << "Invalid loop limit '" << loop_ << "'"
+ << std::endl;
+ return -EINVAL;
+ }
+ } else {
+ std::cerr << "Unsupported property '" << prop << "'" << std::endl;
+ return -EINVAL;
+ }
+
+ event = nextEvent(YAML_MAPPING_END_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ return 0;
+}
+
+int CaptureScript::parseProperties()
+{
+ EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ while (1) {
+ if (event->type == YAML_SEQUENCE_END_EVENT)
+ return 0;
+
+ int ret = parseProperty();
+ if (ret)
+ return ret;
+
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int CaptureScript::parseFrames()
+{
+ EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ while (1) {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ if (event->type == YAML_SEQUENCE_END_EVENT)
+ return 0;
+
+ int ret = parseFrame(std::move(event));
+ if (ret)
+ return ret;
+ }
+}
+
+int CaptureScript::parseFrame(EventPtr event)
+{
+ if (!checkEvent(event, YAML_MAPPING_START_EVENT))
+ return -EINVAL;
+
+ std::string key = parseScalar();
+ if (key.empty())
+ return -EINVAL;
+
+ unsigned int frameId = atoi(key.c_str());
+ if (loop_ && frameId >= loop_) {
+ std::cerr
+ << "Frame id (" << frameId << ") shall be smaller than"
+ << "loop limit (" << loop_ << ")" << std::endl;
+ return -EINVAL;
+ }
+
+ event = nextEvent(YAML_MAPPING_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ ControlList controls{};
+
+ while (1) {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ if (event->type == YAML_MAPPING_END_EVENT)
+ break;
+
+ int ret = parseControl(std::move(event), controls);
+ if (ret)
+ return ret;
+ }
+
+ frameControls_[frameId] = std::move(controls);
+
+ event = nextEvent(YAML_MAPPING_END_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ return 0;
+}
+
+int CaptureScript::parseControl(EventPtr event, ControlList &controls)
+{
+ /* We expect a value after a key. */
+ std::string name = eventScalarValue(event);
+ if (name.empty())
+ return -EINVAL;
+
+ /* If the camera does not support the control just ignore it. */
+ auto it = controls_.find(name);
+ if (it == controls_.end()) {
+ std::cerr << "Unsupported control '" << name << "'" << std::endl;
+ return -EINVAL;
+ }
+
+ const ControlId *controlId = it->second;
+
+ ControlValue val = unpackControl(controlId);
+ if (val.isNone()) {
+ std::cerr << "Error unpacking control '" << name << "'"
+ << std::endl;
+ return -EINVAL;
+ }
+
+ controls.set(controlId->id(), val);
+
+ return 0;
+}
+
+std::string CaptureScript::parseScalar()
+{
+ EventPtr event = nextEvent(YAML_SCALAR_EVENT);
+ if (!event)
+ return "";
+
+ return eventScalarValue(event);
+}
+
+ControlValue CaptureScript::parseRectangles()
+{
+ std::vector<libcamera::Rectangle> rectangles;
+
+ std::vector<std::vector<std::string>> arrays = parseArrays();
+ if (arrays.empty())
+ return {};
+
+ for (const std::vector<std::string> &values : arrays) {
+ if (values.size() != 4) {
+ std::cerr << "Error parsing Rectangle: expected "
+ << "array with 4 parameters" << std::endl;
+ return {};
+ }
+
+ Rectangle rect = unpackRectangle(values);
+ rectangles.push_back(rect);
+ }
+
+ ControlValue controlValue;
+ if (rectangles.size() == 1)
+ controlValue.set(rectangles.at(0));
+ else
+ controlValue.set(Span<const Rectangle>(rectangles));
+
+ return controlValue;
+}
+
+std::vector<std::vector<std::string>> CaptureScript::parseArrays()
+{
+ EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
+ if (!event)
+ return {};
+
+ event = nextEvent();
+ if (!event)
+ return {};
+
+ std::vector<std::vector<std::string>> valueArrays;
+
+ /* Parse single array. */
+ if (event->type == YAML_SCALAR_EVENT) {
+ std::string firstValue = eventScalarValue(event);
+ if (firstValue.empty())
+ return {};
+
+ std::vector<std::string> remaining = parseSingleArray();
+
+ std::vector<std::string> values = { firstValue };
+ values.insert(std::end(values),
+ std::begin(remaining), std::end(remaining));
+ valueArrays.push_back(values);
+
+ return valueArrays;
+ }
+
+ /* Parse array of arrays. */
+ while (1) {
+ switch (event->type) {
+ case YAML_SEQUENCE_START_EVENT: {
+ std::vector<std::string> values = parseSingleArray();
+ valueArrays.push_back(values);
+ break;
+ }
+ case YAML_SEQUENCE_END_EVENT:
+ return valueArrays;
+ default:
+ return {};
+ }
+
+ event = nextEvent();
+ if (!event)
+ return {};
+ }
+}
+
+std::vector<std::string> CaptureScript::parseSingleArray()
+{
+ std::vector<std::string> values;
+
+ while (1) {
+ EventPtr event = nextEvent();
+ if (!event)
+ return {};
+
+ switch (event->type) {
+ case YAML_SCALAR_EVENT: {
+ std::string value = eventScalarValue(event);
+ if (value.empty())
+ return {};
+ values.push_back(value);
+ break;
+ }
+ case YAML_SEQUENCE_END_EVENT:
+ return values;
+ default:
+ return {};
+ }
+ }
+}
+
+void CaptureScript::unpackFailure(const ControlId *id, const std::string &repr)
+{
+ static const std::map<unsigned int, const char *> typeNames = {
+ { ControlTypeNone, "none" },
+ { ControlTypeBool, "bool" },
+ { ControlTypeByte, "byte" },
+ { ControlTypeInteger32, "int32" },
+ { ControlTypeInteger64, "int64" },
+ { ControlTypeFloat, "float" },
+ { ControlTypeString, "string" },
+ { ControlTypeRectangle, "Rectangle" },
+ { ControlTypeSize, "Size" },
+ };
+
+ const char *typeName;
+ auto it = typeNames.find(id->type());
+ if (it != typeNames.end())
+ typeName = it->second;
+ else
+ typeName = "unknown";
+
+ std::cerr << "Unsupported control '" << repr << "' for "
+ << typeName << " control " << id->name() << std::endl;
+}
+
+ControlValue CaptureScript::parseScalarControl(const ControlId *id,
+ const std::string repr)
+{
+ ControlValue value{};
+
+ switch (id->type()) {
+ case ControlTypeNone:
+ break;
+ case ControlTypeBool: {
+ bool val;
+
+ if (repr == "true") {
+ val = true;
+ } else if (repr == "false") {
+ val = false;
+ } else {
+ unpackFailure(id, repr);
+ return value;
+ }
+
+ value.set<bool>(val);
+ break;
+ }
+ case ControlTypeByte: {
+ uint8_t val = strtol(repr.c_str(), NULL, 10);
+ value.set<uint8_t>(val);
+ break;
+ }
+ case ControlTypeInteger32: {
+ int32_t val = strtol(repr.c_str(), NULL, 10);
+ value.set<int32_t>(val);
+ break;
+ }
+ case ControlTypeInteger64: {
+ int64_t val = strtoll(repr.c_str(), NULL, 10);
+ value.set<int64_t>(val);
+ break;
+ }
+ case ControlTypeFloat: {
+ float val = strtof(repr.c_str(), NULL);
+ value.set<float>(val);
+ break;
+ }
+ case ControlTypeString: {
+ value.set<std::string>(repr);
+ break;
+ }
+ default:
+ std::cerr << "Unsupported control type" << std::endl;
+ break;
+ }
+
+ return value;
+}
+
+ControlValue CaptureScript::parseArrayControl(const ControlId *id,
+ const std::vector<std::string> &repr)
+{
+ ControlValue value{};
+
+ switch (id->type()) {
+ case ControlTypeNone:
+ break;
+ case ControlTypeBool: {
+ /*
+ * This is unpleasant, but we cannot use an std::vector<> as its
+ * boolean type overload does not allow to access the raw data,
+ * as boolean values are stored in a bitmask for efficiency.
+ *
+ * As we need a contiguous memory region to wrap in a Span<>,
+ * use an array instead but be strict about not overflowing it
+ * by limiting the number of controls we can store.
+ *
+ * Be loud but do not fail, as the issue would present at
+ * runtime and it's not fatal.
+ */
+ static constexpr unsigned int kMaxNumBooleanControls = 1024;
+ std::array<bool, kMaxNumBooleanControls> values;
+ unsigned int idx = 0;
+
+ for (const std::string &s : repr) {
+ bool val;
+
+ if (s == "true") {
+ val = true;
+ } else if (s == "false") {
+ val = false;
+ } else {
+ unpackFailure(id, s);
+ return value;
+ }
+
+ if (idx == kMaxNumBooleanControls) {
+ std::cerr << "Cannot parse more than "
+ << kMaxNumBooleanControls
+ << " boolean controls" << std::endl;
+ break;
+ }
+
+ values[idx++] = val;
+ }
+
+ value = Span<bool>(values.data(), idx);
+ break;
+ }
+ case ControlTypeByte: {
+ std::vector<uint8_t> values;
+ for (const std::string &s : repr) {
+ uint8_t val = strtoll(s.c_str(), NULL, 10);
+ values.push_back(val);
+ }
+
+ value = Span<const uint8_t>(values.data(), values.size());
+ break;
+ }
+ case ControlTypeInteger32: {
+ std::vector<int32_t> values;
+ for (const std::string &s : repr) {
+ int32_t val = strtoll(s.c_str(), NULL, 10);
+ values.push_back(val);
+ }
+
+ value = Span<const int32_t>(values.data(), values.size());
+ break;
+ }
+ case ControlTypeInteger64: {
+ std::vector<int64_t> values;
+ for (const std::string &s : repr) {
+ int64_t val = strtoll(s.c_str(), NULL, 10);
+ values.push_back(val);
+ }
+
+ value = Span<const int64_t>(values.data(), values.size());
+ break;
+ }
+ case ControlTypeFloat: {
+ std::vector<float> values;
+ for (const std::string &s : repr)
+ values.push_back(strtof(s.c_str(), NULL));
+
+ value = Span<const float>(values.data(), values.size());
+ break;
+ }
+ case ControlTypeString: {
+ value = Span<const std::string>(repr.data(), repr.size());
+ break;
+ }
+ default:
+ std::cerr << "Unsupported control type" << std::endl;
+ break;
+ }
+
+ return value;
+}
+
+ControlValue CaptureScript::unpackControl(const ControlId *id)
+{
+ /* Parse complex types. */
+ switch (id->type()) {
+ case ControlTypeRectangle:
+ return parseRectangles();
+ case ControlTypeSize:
+ /* \todo Parse Sizes. */
+ return {};
+ default:
+ break;
+ }
+
+ /* Check if the control has a single scalar value or is an array. */
+ EventPtr event = nextEvent();
+ if (!event)
+ return {};
+
+ switch (event->type) {
+ case YAML_SCALAR_EVENT: {
+ const std::string repr = eventScalarValue(event);
+ if (repr.empty())
+ return {};
+
+ return parseScalarControl(id, repr);
+ }
+ case YAML_SEQUENCE_START_EVENT: {
+ std::vector<std::string> array = parseSingleArray();
+ if (array.empty())
+ return {};
+
+ return parseArrayControl(id, array);
+ }
+ default:
+ std::cerr << "Unexpected event type: " << event->type << std::endl;
+ return {};
+ }
+}
+
+libcamera::Rectangle CaptureScript::unpackRectangle(const std::vector<std::string> &strVec)
+{
+ int x = strtol(strVec[0].c_str(), NULL, 10);
+ int y = strtol(strVec[1].c_str(), NULL, 10);
+ unsigned int width = strtoul(strVec[2].c_str(), NULL, 10);
+ unsigned int height = strtoul(strVec[3].c_str(), NULL, 10);
+
+ return Rectangle(x, y, width, height);
+}
diff --git a/src/cam/capture_script.h b/src/apps/cam/capture_script.h
index 8b4f8f62..294b9203 100644
--- a/src/cam/capture_script.h
+++ b/src/apps/cam/capture_script.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * capture_script.h - Capture session configuration script
+ * Capture session configuration script
*/
#pragma once
@@ -40,6 +40,7 @@ private:
std::map<unsigned int, libcamera::ControlList> frameControls_;
std::shared_ptr<libcamera::Camera> camera_;
yaml_parser_t parser_;
+ unsigned int loop_;
bool valid_;
EventPtr nextEvent(yaml_event_type_t expectedType = YAML_NO_EVENT);
@@ -49,14 +50,24 @@ private:
int parseScript(FILE *script);
+ int parseProperties();
+ int parseProperty();
int parseFrames();
int parseFrame(EventPtr event);
int parseControl(EventPtr event, libcamera::ControlList &controls);
+ libcamera::ControlValue parseScalarControl(const libcamera::ControlId *id,
+ const std::string repr);
+ libcamera::ControlValue parseArrayControl(const libcamera::ControlId *id,
+ const std::vector<std::string> &repr);
+
std::string parseScalar();
+ libcamera::ControlValue parseRectangles();
+ std::vector<std::vector<std::string>> parseArrays();
+ std::vector<std::string> parseSingleArray();
void unpackFailure(const libcamera::ControlId *id,
const std::string &repr);
- libcamera::ControlValue unpackControl(const libcamera::ControlId *id,
- const std::string &repr);
+ libcamera::ControlValue unpackControl(const libcamera::ControlId *id);
+ libcamera::Rectangle unpackRectangle(const std::vector<std::string> &strVec);
};
diff --git a/src/cam/drm.cpp b/src/apps/cam/drm.cpp
index fbfc0a59..47bbb6b0 100644
--- a/src/cam/drm.cpp
+++ b/src/apps/cam/drm.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * drm.cpp - DRM/KMS Helpers
+ * DRM/KMS Helpers
*/
#include "drm.h"
@@ -24,7 +24,7 @@
#include <libdrm/drm_mode.h>
-#include "event_loop.h"
+#include "../common/event_loop.h"
namespace DRM {
@@ -377,6 +377,8 @@ int AtomicRequest::commit(unsigned int flags)
drmFlags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
if (flags & FlagAsync)
drmFlags |= DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK;
+ if (flags & FlagTestOnly)
+ drmFlags |= DRM_MODE_ATOMIC_TEST_ONLY;
return drmModeAtomicCommit(dev_->fd(), request_, drmFlags, this);
}
@@ -428,7 +430,8 @@ int Device::init()
int Device::openCard()
{
const std::string dirName = "/dev/dri/";
- int ret = -ENOENT;
+ bool found = false;
+ int ret;
/*
* Open the first DRM/KMS device beginning with /dev/dri/card. The
@@ -447,24 +450,42 @@ int Device::openCard()
}
for (struct dirent *res; (res = readdir(folder));) {
+ uint64_t cap;
+
if (strncmp(res->d_name, "card", 4))
continue;
const std::string devName = dirName + res->d_name;
fd_ = open(devName.c_str(), O_RDWR | O_CLOEXEC);
- if (fd_ >= 0) {
- ret = 0;
- break;
+ if (fd_ < 0) {
+ ret = -errno;
+ std::cerr << "Failed to open DRM/KMS device " << devName << ": "
+ << strerror(-ret) << std::endl;
+ continue;
}
- ret = -errno;
- std::cerr << "Failed to open DRM/KMS device " << devName << ": "
- << strerror(-ret) << std::endl;
+ /*
+ * Skip devices that don't support the modeset API, to avoid
+ * selecting a DRM device corresponding to a GPU. There is no
+ * modeset capability, but the kernel returns an error for most
+ * caps if mode setting isn't support by the driver. The
+ * DRM_CAP_DUMB_BUFFER capability is one of those, other would
+ * do as well. The capability value itself isn't relevant.
+ */
+ ret = drmGetCap(fd_, DRM_CAP_DUMB_BUFFER, &cap);
+ if (ret < 0) {
+ drmClose(fd_);
+ fd_ = -1;
+ continue;
+ }
+
+ found = true;
+ break;
}
closedir(folder);
- return ret;
+ return found ? 0 : -ENOENT;
}
int Device::getResources()
diff --git a/src/cam/drm.h b/src/apps/cam/drm.h
index 655a7509..1ba83b6e 100644
--- a/src/cam/drm.h
+++ b/src/apps/cam/drm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * drm.h - DRM/KMS Helpers
+ * DRM/KMS Helpers
*/
#pragma once
@@ -251,6 +251,7 @@ public:
enum Flags {
FlagAllowModeset = (1 << 0),
FlagAsync = (1 << 1),
+ FlagTestOnly = (1 << 2),
};
AtomicRequest(Device *dev);
diff --git a/src/cam/file_sink.cpp b/src/apps/cam/file_sink.cpp
index 45213d4a..3e000d2f 100644
--- a/src/cam/file_sink.cpp
+++ b/src/apps/cam/file_sink.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * file_sink.cpp - File Sink
+ * File Sink
*/
#include <assert.h>
@@ -15,14 +15,22 @@
#include <libcamera/camera.h>
+#include "../common/dng_writer.h"
+#include "../common/image.h"
+#include "../common/ppm_writer.h"
+
#include "file_sink.h"
-#include "image.h"
using namespace libcamera;
-FileSink::FileSink(const std::map<const libcamera::Stream *, std::string> &streamNames,
+FileSink::FileSink([[maybe_unused]] const libcamera::Camera *camera,
+ const std::map<const libcamera::Stream *, std::string> &streamNames,
const std::string &pattern)
- : streamNames_(streamNames), pattern_(pattern)
+ :
+#ifdef HAVE_TIFF
+ camera_(camera),
+#endif
+ streamNames_(streamNames), pattern_(pattern)
{
}
@@ -51,12 +59,13 @@ void FileSink::mapBuffer(FrameBuffer *buffer)
bool FileSink::processRequest(Request *request)
{
for (auto [stream, buffer] : request->buffers())
- writeBuffer(stream, buffer);
+ writeBuffer(stream, buffer, request->metadata());
return true;
}
-void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer)
+void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer,
+ [[maybe_unused]] const ControlList &metadata)
{
std::string filename;
size_t pos;
@@ -65,6 +74,11 @@ void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer)
if (!pattern_.empty())
filename = pattern_;
+#ifdef HAVE_TIFF
+ bool dng = filename.find(".dng", filename.size() - 4) != std::string::npos;
+#endif /* HAVE_TIFF */
+ bool ppm = filename.find(".ppm", filename.size() - 4) != std::string::npos;
+
if (filename.empty() || filename.back() == '/')
filename += "frame-#.bin";
@@ -76,6 +90,30 @@ void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer)
filename.replace(pos, 1, ss.str());
}
+ Image *image = mappedBuffers_[buffer].get();
+
+#ifdef HAVE_TIFF
+ if (dng) {
+ ret = DNGWriter::write(filename.c_str(), camera_,
+ stream->configuration(), metadata,
+ buffer, image->data(0).data());
+ if (ret < 0)
+ std::cerr << "failed to write DNG file `" << filename
+ << "'" << std::endl;
+
+ return;
+ }
+#endif /* HAVE_TIFF */
+ if (ppm) {
+ ret = PPMWriter::write(filename.c_str(), stream->configuration(),
+ image->data(0));
+ if (ret < 0)
+ std::cerr << "failed to write PPM file `" << filename
+ << "'" << std::endl;
+
+ return;
+ }
+
fd = open(filename.c_str(), O_CREAT | O_WRONLY |
(pos == std::string::npos ? O_APPEND : O_TRUNC),
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
@@ -86,16 +124,19 @@ void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer)
return;
}
- Image *image = mappedBuffers_[buffer].get();
-
for (unsigned int i = 0; i < buffer->planes().size(); ++i) {
- const FrameMetadata::Plane &meta = buffer->metadata().planes()[i];
+ /*
+ * This was formerly a local "const FrameMetadata::Plane &"
+ * however this causes a false positive warning for dangling
+ * references on gcc 13.
+ */
+ const unsigned int bytesused = buffer->metadata().planes()[i].bytesused;
Span<uint8_t> data = image->data(i);
- unsigned int length = std::min<unsigned int>(meta.bytesused, data.size());
+ const unsigned int length = std::min<unsigned int>(bytesused, data.size());
- if (meta.bytesused > data.size())
- std::cerr << "payload size " << meta.bytesused
+ if (bytesused > data.size())
+ std::cerr << "payload size " << bytesused
<< " larger than plane size " << data.size()
<< std::endl;
diff --git a/src/cam/file_sink.h b/src/apps/cam/file_sink.h
index 067736f5..9d560783 100644
--- a/src/cam/file_sink.h
+++ b/src/apps/cam/file_sink.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * file_sink.h - File Sink
+ * File Sink
*/
#pragma once
@@ -20,7 +20,8 @@ class Image;
class FileSink : public FrameSink
{
public:
- FileSink(const std::map<const libcamera::Stream *, std::string> &streamNames,
+ FileSink(const libcamera::Camera *camera,
+ const std::map<const libcamera::Stream *, std::string> &streamNames,
const std::string &pattern = "");
~FileSink();
@@ -32,8 +33,12 @@ public:
private:
void writeBuffer(const libcamera::Stream *stream,
- libcamera::FrameBuffer *buffer);
+ libcamera::FrameBuffer *buffer,
+ const libcamera::ControlList &metadata);
+#ifdef HAVE_TIFF
+ const libcamera::Camera *camera_;
+#endif
std::map<const libcamera::Stream *, std::string> streamNames_;
std::string pattern_;
std::map<libcamera::FrameBuffer *, std::unique_ptr<Image>> mappedBuffers_;
diff --git a/src/cam/frame_sink.cpp b/src/apps/cam/frame_sink.cpp
index af21d575..68d6f2c1 100644
--- a/src/cam/frame_sink.cpp
+++ b/src/apps/cam/frame_sink.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * frame_sink.cpp - Base Frame Sink Class
+ * Base Frame Sink Class
*/
#include "frame_sink.h"
diff --git a/src/cam/frame_sink.h b/src/apps/cam/frame_sink.h
index ca4347cb..11105c6c 100644
--- a/src/cam/frame_sink.h
+++ b/src/apps/cam/frame_sink.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * frame_sink.h - Base Frame Sink Class
+ * Base Frame Sink Class
*/
#pragma once
diff --git a/src/cam/kms_sink.cpp b/src/apps/cam/kms_sink.cpp
index 37a3bd50..672c985a 100644
--- a/src/cam/kms_sink.cpp
+++ b/src/apps/cam/kms_sink.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * kms_sink.cpp - KMS Sink
+ * KMS Sink
*/
#include "kms_sink.h"
@@ -149,6 +149,81 @@ int KMSSink::configure(const libcamera::CameraConfiguration &config)
size_ = cfg.size;
stride_ = cfg.stride;
+ /* Configure color space. */
+ colorEncoding_ = std::nullopt;
+ colorRange_ = std::nullopt;
+
+ if (cfg.colorSpace->ycbcrEncoding == libcamera::ColorSpace::YcbcrEncoding::None)
+ return 0;
+
+ /*
+ * The encoding and range enums are defined in the kernel but not
+ * exposed in public headers.
+ */
+ enum drm_color_encoding {
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_BT2020,
+ };
+
+ enum drm_color_range {
+ DRM_COLOR_YCBCR_LIMITED_RANGE,
+ DRM_COLOR_YCBCR_FULL_RANGE,
+ };
+
+ const DRM::Property *colorEncoding = plane_->property("COLOR_ENCODING");
+ const DRM::Property *colorRange = plane_->property("COLOR_RANGE");
+
+ if (colorEncoding) {
+ drm_color_encoding encoding;
+
+ switch (cfg.colorSpace->ycbcrEncoding) {
+ case libcamera::ColorSpace::YcbcrEncoding::Rec601:
+ default:
+ encoding = DRM_COLOR_YCBCR_BT601;
+ break;
+ case libcamera::ColorSpace::YcbcrEncoding::Rec709:
+ encoding = DRM_COLOR_YCBCR_BT709;
+ break;
+ case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
+ encoding = DRM_COLOR_YCBCR_BT2020;
+ break;
+ }
+
+ for (const auto &[id, name] : colorEncoding->enums()) {
+ if (id == encoding) {
+ colorEncoding_ = encoding;
+ break;
+ }
+ }
+ }
+
+ if (colorRange) {
+ drm_color_range range;
+
+ switch (cfg.colorSpace->range) {
+ case libcamera::ColorSpace::Range::Limited:
+ default:
+ range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+ break;
+ case libcamera::ColorSpace::Range::Full:
+ range = DRM_COLOR_YCBCR_FULL_RANGE;
+ break;
+ }
+
+ for (const auto &[id, name] : colorRange->enums()) {
+ if (id == range) {
+ colorRange_ = range;
+ break;
+ }
+ }
+ }
+
+ if (!colorEncoding_ || !colorRange_)
+ std::cerr << "Color space " << cfg.colorSpace->toString()
+ << " not supported by the display device."
+ << " Colors may be wrong." << std::endl;
+
return 0;
}
@@ -228,24 +303,22 @@ int KMSSink::configurePipeline(const libcamera::PixelFormat &format)
int KMSSink::start()
{
- std::unique_ptr<DRM::AtomicRequest> request;
-
int ret = FrameSink::start();
if (ret < 0)
return ret;
/* Disable all CRTCs and planes to start from a known valid state. */
- request = std::make_unique<DRM::AtomicRequest>(&dev_);
+ DRM::AtomicRequest request(&dev_);
for (const DRM::Crtc &crtc : dev_.crtcs())
- request->addProperty(&crtc, "ACTIVE", 0);
+ request.addProperty(&crtc, "ACTIVE", 0);
for (const DRM::Plane &plane : dev_.planes()) {
- request->addProperty(&plane, "CRTC_ID", 0);
- request->addProperty(&plane, "FB_ID", 0);
+ request.addProperty(&plane, "CRTC_ID", 0);
+ request.addProperty(&plane, "FB_ID", 0);
}
- ret = request->commit(DRM::AtomicRequest::FlagAllowModeset);
+ ret = request.commit(DRM::AtomicRequest::FlagAllowModeset);
if (ret < 0) {
std::cerr
<< "Failed to disable CRTCs and planes: "
@@ -284,6 +357,94 @@ int KMSSink::stop()
return FrameSink::stop();
}
+bool KMSSink::testModeSet(DRM::FrameBuffer *drmBuffer,
+ const libcamera::Rectangle &src,
+ const libcamera::Rectangle &dst)
+{
+ DRM::AtomicRequest drmRequest{ &dev_ };
+
+ drmRequest.addProperty(connector_, "CRTC_ID", crtc_->id());
+
+ drmRequest.addProperty(crtc_, "ACTIVE", 1);
+ drmRequest.addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_));
+
+ drmRequest.addProperty(plane_, "CRTC_ID", crtc_->id());
+ drmRequest.addProperty(plane_, "FB_ID", drmBuffer->id());
+ drmRequest.addProperty(plane_, "SRC_X", src.x << 16);
+ drmRequest.addProperty(plane_, "SRC_Y", src.y << 16);
+ drmRequest.addProperty(plane_, "SRC_W", src.width << 16);
+ drmRequest.addProperty(plane_, "SRC_H", src.height << 16);
+ drmRequest.addProperty(plane_, "CRTC_X", dst.x);
+ drmRequest.addProperty(plane_, "CRTC_Y", dst.y);
+ drmRequest.addProperty(plane_, "CRTC_W", dst.width);
+ drmRequest.addProperty(plane_, "CRTC_H", dst.height);
+
+ return !drmRequest.commit(DRM::AtomicRequest::FlagAllowModeset |
+ DRM::AtomicRequest::FlagTestOnly);
+}
+
+bool KMSSink::setupComposition(DRM::FrameBuffer *drmBuffer)
+{
+ /*
+ * Test composition options, from most to least desirable, to select the
+ * best one.
+ */
+ const libcamera::Rectangle framebuffer{ size_ };
+ const libcamera::Rectangle display{ 0, 0, mode_->hdisplay, mode_->vdisplay };
+
+ /* 1. Scale the frame buffer to full screen, preserving aspect ratio. */
+ libcamera::Rectangle src = framebuffer;
+ libcamera::Rectangle dst = display.size().boundedToAspectRatio(framebuffer.size())
+ .centeredTo(display.center());
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: full-screen scaled output, square pixels"
+ << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /*
+ * 2. Scale the frame buffer to full screen, without preserving aspect
+ * ratio.
+ */
+ src = framebuffer;
+ dst = display;
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: full-screen scaled output, non-square pixels"
+ << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /* 3. Center the frame buffer on the display. */
+ src = display.size().centeredTo(framebuffer.center()).boundedTo(framebuffer);
+ dst = framebuffer.size().centeredTo(display.center()).boundedTo(display);
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: centered output" << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /* 4. Align the frame buffer on the top-left of the display. */
+ src = framebuffer.boundedTo(display);
+ dst = display.boundedTo(framebuffer);
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: top-left aligned output" << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ return false;
+}
+
bool KMSSink::processRequest(libcamera::Request *camRequest)
{
/*
@@ -301,35 +462,46 @@ bool KMSSink::processRequest(libcamera::Request *camRequest)
DRM::FrameBuffer *drmBuffer = iter->second.get();
unsigned int flags = DRM::AtomicRequest::FlagAsync;
- DRM::AtomicRequest *drmRequest = new DRM::AtomicRequest(&dev_);
+ std::unique_ptr<DRM::AtomicRequest> drmRequest =
+ std::make_unique<DRM::AtomicRequest>(&dev_);
drmRequest->addProperty(plane_, "FB_ID", drmBuffer->id());
if (!active_ && !queued_) {
/* Enable the display pipeline on the first frame. */
+ if (!setupComposition(drmBuffer)) {
+ std::cerr << "Failed to setup composition" << std::endl;
+ return true;
+ }
+
drmRequest->addProperty(connector_, "CRTC_ID", crtc_->id());
drmRequest->addProperty(crtc_, "ACTIVE", 1);
drmRequest->addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_));
drmRequest->addProperty(plane_, "CRTC_ID", crtc_->id());
- drmRequest->addProperty(plane_, "SRC_X", 0 << 16);
- drmRequest->addProperty(plane_, "SRC_Y", 0 << 16);
- drmRequest->addProperty(plane_, "SRC_W", size_.width << 16);
- drmRequest->addProperty(plane_, "SRC_H", size_.height << 16);
- drmRequest->addProperty(plane_, "CRTC_X", 0);
- drmRequest->addProperty(plane_, "CRTC_Y", 0);
- drmRequest->addProperty(plane_, "CRTC_W", size_.width);
- drmRequest->addProperty(plane_, "CRTC_H", size_.height);
+ drmRequest->addProperty(plane_, "SRC_X", src_.x << 16);
+ drmRequest->addProperty(plane_, "SRC_Y", src_.y << 16);
+ drmRequest->addProperty(plane_, "SRC_W", src_.width << 16);
+ drmRequest->addProperty(plane_, "SRC_H", src_.height << 16);
+ drmRequest->addProperty(plane_, "CRTC_X", dst_.x);
+ drmRequest->addProperty(plane_, "CRTC_Y", dst_.y);
+ drmRequest->addProperty(plane_, "CRTC_W", dst_.width);
+ drmRequest->addProperty(plane_, "CRTC_H", dst_.height);
+
+ if (colorEncoding_)
+ drmRequest->addProperty(plane_, "COLOR_ENCODING", *colorEncoding_);
+ if (colorRange_)
+ drmRequest->addProperty(plane_, "COLOR_RANGE", *colorRange_);
flags |= DRM::AtomicRequest::FlagAllowModeset;
}
- pending_ = std::make_unique<Request>(drmRequest, camRequest);
+ pending_ = std::make_unique<Request>(std::move(drmRequest), camRequest);
std::lock_guard<std::mutex> lock(lock_);
if (!queued_) {
- int ret = drmRequest->commit(flags);
+ int ret = pending_->drmRequest_->commit(flags);
if (ret < 0) {
std::cerr
<< "Failed to commit atomic request: "
@@ -343,7 +515,7 @@ bool KMSSink::processRequest(libcamera::Request *camRequest)
return false;
}
-void KMSSink::requestComplete(DRM::AtomicRequest *request)
+void KMSSink::requestComplete([[maybe_unused]] DRM::AtomicRequest *request)
{
std::lock_guard<std::mutex> lock(lock_);
diff --git a/src/cam/kms_sink.h b/src/apps/cam/kms_sink.h
index 4a0a872c..4b7b4c26 100644
--- a/src/cam/kms_sink.h
+++ b/src/apps/cam/kms_sink.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * kms_sink.h - KMS Sink
+ * KMS Sink
*/
#pragma once
@@ -10,6 +10,7 @@
#include <list>
#include <memory>
#include <mutex>
+#include <optional>
#include <string>
#include <utility>
@@ -38,8 +39,9 @@ private:
class Request
{
public:
- Request(DRM::AtomicRequest *drmRequest, libcamera::Request *camRequest)
- : drmRequest_(drmRequest), camRequest_(camRequest)
+ Request(std::unique_ptr<DRM::AtomicRequest> drmRequest,
+ libcamera::Request *camRequest)
+ : drmRequest_(std::move(drmRequest)), camRequest_(camRequest)
{
}
@@ -49,6 +51,11 @@ private:
int selectPipeline(const libcamera::PixelFormat &format);
int configurePipeline(const libcamera::PixelFormat &format);
+ bool testModeSet(DRM::FrameBuffer *drmBuffer,
+ const libcamera::Rectangle &src,
+ const libcamera::Rectangle &dst);
+ bool setupComposition(DRM::FrameBuffer *drmBuffer);
+
void requestComplete(DRM::AtomicRequest *request);
DRM::Device dev_;
@@ -61,6 +68,11 @@ private:
libcamera::PixelFormat format_;
libcamera::Size size_;
unsigned int stride_;
+ std::optional<unsigned int> colorEncoding_;
+ std::optional<unsigned int> colorRange_;
+
+ libcamera::Rectangle src_;
+ libcamera::Rectangle dst_;
std::map<libcamera::FrameBuffer *, std::unique_ptr<DRM::FrameBuffer>> buffers_;
diff --git a/src/cam/main.cpp b/src/apps/cam/main.cpp
index 79875ed7..4f87f200 100644
--- a/src/cam/main.cpp
+++ b/src/apps/cam/main.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main.cpp - cam - The libcamera swiss army knife
+ * cam - The libcamera swiss army knife
*/
#include <atomic>
@@ -14,11 +14,12 @@
#include <libcamera/libcamera.h>
#include <libcamera/property_ids.h>
+#include "../common/event_loop.h"
+#include "../common/options.h"
+#include "../common/stream_options.h"
+
#include "camera_session.h"
-#include "event_loop.h"
#include "main.h"
-#include "options.h"
-#include "stream_options.h"
using namespace libcamera;
@@ -132,6 +133,11 @@ int CamApp::parseOptions(int argc, char *argv[])
"Capture until interrupted by user or until <count> frames captured",
"capture", ArgumentOptional, "count", false,
OptCamera);
+
+ parser.addOption(OptOrientation, OptionString,
+ "Desired image orientation (rot0, rot180, mirror, flip)",
+ "orientation", ArgumentRequired, "orientation", false,
+ OptCamera);
#ifdef HAVE_KMS
parser.addOption(OptDisplay, OptionString,
"Display viewfinder through DRM/KMS on specified connector",
@@ -144,6 +150,12 @@ int CamApp::parseOptions(int argc, char *argv[])
"to write files, using the default file name. Otherwise it sets the\n"
"full file path and name. The first '#' character in the file name\n"
"is expanded to the camera index, stream name and frame sequence number.\n"
+#ifdef HAVE_TIFF
+ "If the file name ends with '.dng', then the frame will be written to\n"
+ "the output file(s) in DNG format.\n"
+#endif
+ "If the file name ends with '.ppm', then the frame will be written to\n"
+ "the output file(s) in PPM format.\n"
"The default file name is 'frame-#.bin'.",
"file", ArgumentOptional, "filename", false,
OptCamera);
@@ -300,8 +312,9 @@ std::string CamApp::cameraName(const Camera *camera)
* Construct the name from the camera location, model and ID. The model
* is only used if the location isn't present or is set to External.
*/
- if (props.contains(properties::Location)) {
- switch (props.get(properties::Location)) {
+ const auto &location = props.get(properties::Location);
+ if (location) {
+ switch (*location) {
case properties::CameraLocationFront:
addModel = false;
name = "Internal front camera ";
@@ -316,12 +329,14 @@ std::string CamApp::cameraName(const Camera *camera)
}
}
- if (addModel && props.contains(properties::Model)) {
+ if (addModel) {
/*
* If the camera location is not availble use the camera model
* to build the camera name.
*/
- name = "'" + props.get(properties::Model) + "' ";
+ const auto &model = props.get(properties::Model);
+ if (model)
+ name = "'" + *model + "' ";
}
name += "(" + camera->id() + ")";
diff --git a/src/cam/main.h b/src/apps/cam/main.h
index 526aecec..64e6a20e 100644
--- a/src/cam/main.h
+++ b/src/apps/cam/main.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main.h - Cam application
+ * Cam application
*/
#pragma once
@@ -17,6 +17,7 @@ enum {
OptList = 'l',
OptListProperties = 'p',
OptMonitor = 'm',
+ OptOrientation = 'o',
OptSDL = 'S',
OptStream = 's',
OptListControls = 256,
diff --git a/src/cam/meson.build b/src/apps/cam/meson.build
index 5957ce14..c70ca3cd 100644
--- a/src/cam/meson.build
+++ b/src/apps/cam/meson.build
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-libevent = dependency('libevent_pthreads', required : get_option('cam'))
-
-if not libevent.found()
+if opt_cam.disabled() or not libevent.found()
cam_enabled = false
subdir_done()
endif
@@ -12,20 +10,16 @@ cam_enabled = true
cam_sources = files([
'camera_session.cpp',
'capture_script.cpp',
- 'event_loop.cpp',
'file_sink.cpp',
'frame_sink.cpp',
- 'image.cpp',
'main.cpp',
- 'options.cpp',
- 'stream_options.cpp',
])
-cam_cpp_args = []
+cam_cpp_args = [apps_cpp_args]
libdrm = dependency('libdrm', required : false)
+libjpeg = dependency('libjpeg', required : false)
libsdl2 = dependency('SDL2', required : false)
-libsdl2_image = dependency('SDL2_image', required : false)
if libdrm.found()
cam_cpp_args += [ '-DHAVE_KMS' ]
@@ -40,11 +34,11 @@ if libsdl2.found()
cam_sources += files([
'sdl_sink.cpp',
'sdl_texture.cpp',
- 'sdl_texture_yuyv.cpp'
+ 'sdl_texture_yuv.cpp',
])
- if libsdl2_image.found()
- cam_cpp_args += ['-DHAVE_SDL_IMAGE']
+ if libjpeg.found()
+ cam_cpp_args += ['-DHAVE_LIBJPEG']
cam_sources += files([
'sdl_texture_mjpg.cpp'
])
@@ -52,14 +46,17 @@ if libsdl2.found()
endif
cam = executable('cam', cam_sources,
+ link_with : apps_lib,
dependencies : [
libatomic,
libcamera_public,
libdrm,
libevent,
+ libjpeg,
libsdl2,
- libsdl2_image,
+ libtiff,
libyaml,
],
cpp_args : cam_cpp_args,
- install : true)
+ install : true,
+ install_tag : 'bin')
diff --git a/src/cam/sdl_sink.cpp b/src/apps/cam/sdl_sink.cpp
index f8e3e95d..8355dd5e 100644
--- a/src/cam/sdl_sink.cpp
+++ b/src/apps/cam/sdl_sink.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_sink.h - SDL Sink
+ * SDL Sink
*/
#include "sdl_sink.h"
@@ -19,12 +19,13 @@
#include <libcamera/camera.h>
#include <libcamera/formats.h>
-#include "event_loop.h"
-#include "image.h"
-#ifdef HAVE_SDL_IMAGE
+#include "../common/event_loop.h"
+#include "../common/image.h"
+
+#ifdef HAVE_LIBJPEG
#include "sdl_texture_mjpg.h"
#endif
-#include "sdl_texture_yuyv.h"
+#include "sdl_texture_yuv.h"
using namespace libcamera;
@@ -62,13 +63,18 @@ int SDLSink::configure(const libcamera::CameraConfiguration &config)
rect_.h = cfg.size.height;
switch (cfg.pixelFormat) {
-#ifdef HAVE_SDL_IMAGE
+#ifdef HAVE_LIBJPEG
case libcamera::formats::MJPEG:
texture_ = std::make_unique<SDLTextureMJPG>(rect_);
break;
#endif
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+ case libcamera::formats::NV12:
+ texture_ = std::make_unique<SDLTextureNV12>(rect_, cfg.stride);
+ break;
+#endif
case libcamera::formats::YUYV:
- texture_ = std::make_unique<SDLTextureYUYV>(rect_);
+ texture_ = std::make_unique<SDLTextureYUYV>(rect_, cfg.stride);
break;
default:
std::cerr << "Unsupported pixel format "
@@ -185,16 +191,23 @@ void SDLSink::renderBuffer(FrameBuffer *buffer)
{
Image *image = mappedBuffers_[buffer].get();
- /* \todo Implement support for multi-planar formats. */
- const FrameMetadata::Plane &meta = buffer->metadata().planes()[0];
+ std::vector<Span<const uint8_t>> planes;
+ unsigned int i = 0;
- Span<uint8_t> data = image->data(0);
- if (meta.bytesused > data.size())
- std::cerr << "payload size " << meta.bytesused
- << " larger than plane size " << data.size()
- << std::endl;
+ planes.reserve(buffer->metadata().planes().size());
+
+ for (const FrameMetadata::Plane &meta : buffer->metadata().planes()) {
+ Span<uint8_t> data = image->data(i);
+ if (meta.bytesused > data.size())
+ std::cerr << "payload size " << meta.bytesused
+ << " larger than plane size " << data.size()
+ << std::endl;
+
+ planes.push_back(data);
+ i++;
+ }
- texture_->update(data);
+ texture_->update(planes);
SDL_RenderClear(renderer_);
SDL_RenderCopy(renderer_, texture_->get(), nullptr, nullptr);
diff --git a/src/cam/sdl_sink.h b/src/apps/cam/sdl_sink.h
index 6c19c663..18ec7fbe 100644
--- a/src/cam/sdl_sink.h
+++ b/src/apps/cam/sdl_sink.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_sink.h - SDL Sink
+ * SDL Sink
*/
#pragma once
diff --git a/src/cam/sdl_texture.cpp b/src/apps/cam/sdl_texture.cpp
index 2ca2add2..e52c4a3a 100644
--- a/src/cam/sdl_texture.cpp
+++ b/src/apps/cam/sdl_texture.cpp
@@ -2,16 +2,16 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture.cpp - SDL Texture
+ * SDL Texture
*/
#include "sdl_texture.h"
#include <iostream>
-SDLTexture::SDLTexture(const SDL_Rect &rect, SDL_PixelFormatEnum pixelFormat,
- const int pitch)
- : ptr_(nullptr), rect_(rect), pixelFormat_(pixelFormat), pitch_(pitch)
+SDLTexture::SDLTexture(const SDL_Rect &rect, uint32_t pixelFormat,
+ const int stride)
+ : ptr_(nullptr), rect_(rect), pixelFormat_(pixelFormat), stride_(stride)
{
}
diff --git a/src/cam/sdl_texture.h b/src/apps/cam/sdl_texture.h
index 90974798..990f83b6 100644
--- a/src/cam/sdl_texture.h
+++ b/src/apps/cam/sdl_texture.h
@@ -2,28 +2,29 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture.h - SDL Texture
+ * SDL Texture
*/
#pragma once
+#include <vector>
+
#include <SDL2/SDL.h>
-#include "image.h"
+#include "../common/image.h"
class SDLTexture
{
public:
- SDLTexture(const SDL_Rect &rect, SDL_PixelFormatEnum pixelFormat,
- const int pitch);
+ SDLTexture(const SDL_Rect &rect, uint32_t pixelFormat, const int stride);
virtual ~SDLTexture();
int create(SDL_Renderer *renderer);
- virtual void update(const libcamera::Span<uint8_t> &data) = 0;
+ virtual void update(const std::vector<libcamera::Span<const uint8_t>> &data) = 0;
SDL_Texture *get() const { return ptr_; }
protected:
SDL_Texture *ptr_;
const SDL_Rect rect_;
- const SDL_PixelFormatEnum pixelFormat_;
- const int pitch_;
+ const uint32_t pixelFormat_;
+ const int stride_;
};
diff --git a/src/apps/cam/sdl_texture_mjpg.cpp b/src/apps/cam/sdl_texture_mjpg.cpp
new file mode 100644
index 00000000..cace18fc
--- /dev/null
+++ b/src/apps/cam/sdl_texture_mjpg.cpp
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL Texture MJPG
+ */
+
+#include "sdl_texture_mjpg.h"
+
+#include <iostream>
+#include <setjmp.h>
+#include <stdio.h>
+
+#include <jpeglib.h>
+
+using namespace libcamera;
+
+struct JpegErrorManager : public jpeg_error_mgr {
+ JpegErrorManager()
+ {
+ jpeg_std_error(this);
+ error_exit = errorExit;
+ output_message = outputMessage;
+ }
+
+ static void errorExit(j_common_ptr cinfo)
+ {
+ JpegErrorManager *self =
+ static_cast<JpegErrorManager *>(cinfo->err);
+ longjmp(self->escape_, 1);
+ }
+
+ static void outputMessage([[maybe_unused]] j_common_ptr cinfo)
+ {
+ }
+
+ jmp_buf escape_;
+};
+
+SDLTextureMJPG::SDLTextureMJPG(const SDL_Rect &rect)
+ : SDLTexture(rect, SDL_PIXELFORMAT_RGB24, rect.w * 3),
+ rgb_(std::make_unique<unsigned char[]>(stride_ * rect.h))
+{
+}
+
+int SDLTextureMJPG::decompress(Span<const uint8_t> data)
+{
+ struct jpeg_decompress_struct cinfo;
+
+ JpegErrorManager errorManager;
+ if (setjmp(errorManager.escape_)) {
+ /* libjpeg found an error */
+ jpeg_destroy_decompress(&cinfo);
+ std::cerr << "JPEG decompression error" << std::endl;
+ return -EINVAL;
+ }
+
+ cinfo.err = &errorManager;
+ jpeg_create_decompress(&cinfo);
+
+ jpeg_mem_src(&cinfo, data.data(), data.size());
+
+ jpeg_read_header(&cinfo, TRUE);
+
+ jpeg_start_decompress(&cinfo);
+
+ for (int i = 0; cinfo.output_scanline < cinfo.output_height; ++i) {
+ JSAMPROW rowptr = rgb_.get() + i * stride_;
+ jpeg_read_scanlines(&cinfo, &rowptr, 1);
+ }
+
+ jpeg_finish_decompress(&cinfo);
+
+ jpeg_destroy_decompress(&cinfo);
+
+ return 0;
+}
+
+void SDLTextureMJPG::update(const std::vector<libcamera::Span<const uint8_t>> &data)
+{
+ decompress(data[0]);
+ SDL_UpdateTexture(ptr_, nullptr, rgb_.get(), stride_);
+}
diff --git a/src/cam/sdl_texture_mjpg.h b/src/apps/cam/sdl_texture_mjpg.h
index b103f801..37bed5f0 100644
--- a/src/cam/sdl_texture_mjpg.h
+++ b/src/apps/cam/sdl_texture_mjpg.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture_mjpg.h - SDL Texture MJPG
+ * SDL Texture MJPG
*/
#pragma once
@@ -13,5 +13,11 @@ class SDLTextureMJPG : public SDLTexture
{
public:
SDLTextureMJPG(const SDL_Rect &rect);
- void update(const libcamera::Span<uint8_t> &data) override;
+
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+
+private:
+ int decompress(libcamera::Span<const uint8_t> data);
+
+ std::unique_ptr<unsigned char[]> rgb_;
};
diff --git a/src/apps/cam/sdl_texture_yuv.cpp b/src/apps/cam/sdl_texture_yuv.cpp
new file mode 100644
index 00000000..480d7a37
--- /dev/null
+++ b/src/apps/cam/sdl_texture_yuv.cpp
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL YUV Textures
+ */
+
+#include "sdl_texture_yuv.h"
+
+using namespace libcamera;
+
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+SDLTextureNV12::SDLTextureNV12(const SDL_Rect &rect, unsigned int stride)
+ : SDLTexture(rect, SDL_PIXELFORMAT_NV12, stride)
+{
+}
+
+void SDLTextureNV12::update(const std::vector<libcamera::Span<const uint8_t>> &data)
+{
+ SDL_UpdateNVTexture(ptr_, &rect_, data[0].data(), stride_,
+ data[1].data(), stride_);
+}
+#endif
+
+SDLTextureYUYV::SDLTextureYUYV(const SDL_Rect &rect, unsigned int stride)
+ : SDLTexture(rect, SDL_PIXELFORMAT_YUY2, stride)
+{
+}
+
+void SDLTextureYUYV::update(const std::vector<libcamera::Span<const uint8_t>> &data)
+{
+ SDL_UpdateTexture(ptr_, &rect_, data[0].data(), stride_);
+}
diff --git a/src/apps/cam/sdl_texture_yuv.h b/src/apps/cam/sdl_texture_yuv.h
new file mode 100644
index 00000000..29c756e7
--- /dev/null
+++ b/src/apps/cam/sdl_texture_yuv.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL YUV Textures
+ */
+
+#pragma once
+
+#include "sdl_texture.h"
+
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+class SDLTextureNV12 : public SDLTexture
+{
+public:
+ SDLTextureNV12(const SDL_Rect &rect, unsigned int stride);
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+};
+#endif
+
+class SDLTextureYUYV : public SDLTexture
+{
+public:
+ SDLTextureYUYV(const SDL_Rect &rect, unsigned int stride);
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+};
diff --git a/src/qcam/dng_writer.cpp b/src/apps/common/dng_writer.cpp
index 34c8df5a..59f1fa23 100644
--- a/src/qcam/dng_writer.cpp
+++ b/src/apps/common/dng_writer.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * dng_writer.cpp - DNG writer
+ * DNG writer
*/
#include "dng_writer.h"
@@ -126,6 +126,14 @@ struct Matrix3d {
float m[9];
};
+void packScanlineSBGGR8(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ std::copy(in, in + width, out);
+}
+
void packScanlineSBGGR10P(void *output, const void *input, unsigned int width)
{
const uint8_t *in = static_cast<const uint8_t *>(input);
@@ -240,6 +248,7 @@ void thumbScanlineIPU3([[maybe_unused]] const FormatInfo &info, void *output,
uint16_t val1, val2, val3, val4;
switch (pixelInBlock % 4) {
+ default:
case 0:
val1 = (in[1] & 0x03) << 14 | (in[0] & 0xff) << 6;
val2 = (in[2] & 0x0f) << 12 | (in[1] & 0xfc) << 4;
@@ -274,6 +283,30 @@ void thumbScanlineIPU3([[maybe_unused]] const FormatInfo &info, void *output,
}
static const std::map<PixelFormat, FormatInfo> formatInfo = {
+ { formats::SBGGR8, {
+ .bitsPerSample = 8,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineSBGGR8,
+ .thumbScanline = thumbScanlineSBGGRxxP,
+ } },
+ { formats::SGBRG8, {
+ .bitsPerSample = 8,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineSBGGR8,
+ .thumbScanline = thumbScanlineSBGGRxxP,
+ } },
+ { formats::SGRBG8, {
+ .bitsPerSample = 8,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineSBGGR8,
+ .thumbScanline = thumbScanlineSBGGRxxP,
+ } },
+ { formats::SRGGB8, {
+ .bitsPerSample = 8,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineSBGGR8,
+ .thumbScanline = thumbScanlineSBGGRxxP,
+ } },
{ formats::SBGGR10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
@@ -391,9 +424,9 @@ int DNGWriter::write(const char *filename, const Camera *camera,
TIFFSetField(tif, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB);
TIFFSetField(tif, TIFFTAG_MAKE, "libcamera");
- if (cameraProperties.contains(properties::Model)) {
- std::string model = cameraProperties.get(properties::Model);
- TIFFSetField(tif, TIFFTAG_MODEL, model.c_str());
+ const auto &model = cameraProperties.get(properties::Model);
+ if (model) {
+ TIFFSetField(tif, TIFFTAG_MODEL, model->c_str());
/* \todo set TIFFTAG_UNIQUECAMERAMODEL. */
}
@@ -437,17 +470,18 @@ int DNGWriter::write(const char *filename, const Camera *camera,
*/
const double eps = 1e-2;
- if (metadata.contains(controls::ColourGains)) {
- Span<const float> const &colourGains = metadata.get(controls::ColourGains);
- if (colourGains[0] > eps && colourGains[1] > eps) {
- wbGain = Matrix3d::diag(colourGains[0], 1, colourGains[1]);
- neutral[0] = 1.0 / colourGains[0]; /* red */
- neutral[2] = 1.0 / colourGains[1]; /* blue */
+ const auto &colourGains = metadata.get(controls::ColourGains);
+ if (colourGains) {
+ if ((*colourGains)[0] > eps && (*colourGains)[1] > eps) {
+ wbGain = Matrix3d::diag((*colourGains)[0], 1, (*colourGains)[1]);
+ neutral[0] = 1.0 / (*colourGains)[0]; /* red */
+ neutral[2] = 1.0 / (*colourGains)[1]; /* blue */
}
}
- if (metadata.contains(controls::ColourCorrectionMatrix)) {
- Span<const float> const &coeffs = metadata.get(controls::ColourCorrectionMatrix);
- Matrix3d ccmSupplied(coeffs);
+
+ const auto &ccmControl = metadata.get(controls::ColourCorrectionMatrix);
+ if (ccmControl) {
+ Matrix3d ccmSupplied(*ccmControl);
if (ccmSupplied.determinant() > eps)
ccm = ccmSupplied;
}
@@ -506,7 +540,10 @@ int DNGWriter::write(const char *filename, const Camera *camera,
TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT);
TIFFSetField(tif, TIFFTAG_CFAREPEATPATTERNDIM, cfaRepeatPatternDim);
- TIFFSetField(tif, TIFFTAG_CFAPATTERN, info->pattern);
+ if (TIFFLIB_VERSION < 20201219)
+ TIFFSetField(tif, TIFFTAG_CFAPATTERN, info->pattern);
+ else
+ TIFFSetField(tif, TIFFTAG_CFAPATTERN, 4, info->pattern);
TIFFSetField(tif, TIFFTAG_CFAPLANECOLOR, 3, cfaPlaneColor);
TIFFSetField(tif, TIFFTAG_CFALAYOUT, 1);
@@ -514,8 +551,9 @@ int DNGWriter::write(const char *filename, const Camera *camera,
float blackLevel[] = { 0.0f, 0.0f, 0.0f, 0.0f };
uint32_t whiteLevel = (1 << info->bitsPerSample) - 1;
- if (metadata.contains(controls::SensorBlackLevels)) {
- Span<const int32_t> levels = metadata.get(controls::SensorBlackLevels);
+ const auto &blackLevels = metadata.get(controls::SensorBlackLevels);
+ if (blackLevels) {
+ Span<const int32_t, 4> levels = *blackLevels;
/*
* The black levels control is specified in R, Gr, Gb, B order.
@@ -592,16 +630,15 @@ int DNGWriter::write(const char *filename, const Camera *camera,
TIFFSetField(tif, EXIFTAG_DATETIMEORIGINAL, strTime);
TIFFSetField(tif, EXIFTAG_DATETIMEDIGITIZED, strTime);
- if (metadata.contains(controls::AnalogueGain)) {
- float gain = metadata.get(controls::AnalogueGain);
- uint16_t iso = std::min(std::max(gain * 100, 0.0f), 65535.0f);
+ const auto &analogGain = metadata.get(controls::AnalogueGain);
+ if (analogGain) {
+ uint16_t iso = std::min(std::max(*analogGain * 100, 0.0f), 65535.0f);
TIFFSetField(tif, EXIFTAG_ISOSPEEDRATINGS, 1, &iso);
}
- if (metadata.contains(controls::ExposureTime)) {
- float exposureTime = metadata.get(controls::ExposureTime) / 1e6;
- TIFFSetField(tif, EXIFTAG_EXPOSURETIME, exposureTime);
- }
+ const auto &exposureTime = metadata.get(controls::ExposureTime);
+ if (exposureTime)
+ TIFFSetField(tif, EXIFTAG_EXPOSURETIME, *exposureTime / 1e6);
TIFFWriteCustomDirectory(tif, &exifIFDOffset);
diff --git a/src/qcam/dng_writer.h b/src/apps/common/dng_writer.h
index c044bf8b..917713e6 100644
--- a/src/qcam/dng_writer.h
+++ b/src/apps/common/dng_writer.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * dng_writer.h - DNG writer
+ * DNG writer
*/
#pragma once
diff --git a/src/cam/event_loop.cpp b/src/apps/common/event_loop.cpp
index cb83845c..f7f9afa0 100644
--- a/src/cam/event_loop.cpp
+++ b/src/apps/common/event_loop.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_loop.cpp - cam - Event loop
+ * cam - Event loop
*/
#include "event_loop.h"
diff --git a/src/cam/event_loop.h b/src/apps/common/event_loop.h
index ef79e8e5..ef129b9a 100644
--- a/src/cam/event_loop.h
+++ b/src/apps/common/event_loop.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_loop.h - cam - Event loop
+ * cam - Event loop
*/
#pragma once
diff --git a/src/cam/image.cpp b/src/apps/common/image.cpp
index fe2cc6da..a2a0f58f 100644
--- a/src/cam/image.cpp
+++ b/src/apps/common/image.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * image.cpp - Multi-planar image with access to pixel data
+ * Multi-planar image with access to pixel data
*/
#include "image.h"
diff --git a/src/cam/image.h b/src/apps/common/image.h
index 7953b177..e47e446b 100644
--- a/src/cam/image.h
+++ b/src/apps/common/image.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * image.h - Multi-planar image with access to pixel data
+ * Multi-planar image with access to pixel data
*/
#pragma once
diff --git a/src/apps/common/meson.build b/src/apps/common/meson.build
new file mode 100644
index 00000000..5b683390
--- /dev/null
+++ b/src/apps/common/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: CC0-1.0
+
+apps_sources = files([
+ 'image.cpp',
+ 'options.cpp',
+ 'ppm_writer.cpp',
+ 'stream_options.cpp',
+])
+
+apps_cpp_args = []
+
+if libevent.found()
+ apps_sources += files([
+ 'event_loop.cpp',
+ ])
+endif
+
+if libtiff.found()
+ apps_cpp_args += ['-DHAVE_TIFF']
+ apps_sources += files([
+ 'dng_writer.cpp',
+ ])
+endif
+
+apps_lib = static_library('apps', apps_sources,
+ cpp_args : apps_cpp_args,
+ dependencies : [libcamera_public])
diff --git a/src/cam/options.cpp b/src/apps/common/options.cpp
index 4f7e8691..ab19aa3d 100644
--- a/src/cam/options.cpp
+++ b/src/apps/common/options.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * options.cpp - cam - Options parsing
+ * cam - Options parsing
*/
#include <assert.h>
diff --git a/src/cam/options.h b/src/apps/common/options.h
index 4ddd4987..9771aa7a 100644
--- a/src/cam/options.h
+++ b/src/apps/common/options.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * options.h - cam - Options parsing
+ * cam - Options parsing
*/
#pragma once
diff --git a/src/apps/common/ppm_writer.cpp b/src/apps/common/ppm_writer.cpp
new file mode 100644
index 00000000..d6c8641d
--- /dev/null
+++ b/src/apps/common/ppm_writer.cpp
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * PPM writer
+ */
+
+#include "ppm_writer.h"
+
+#include <fstream>
+#include <iostream>
+
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+
+using namespace libcamera;
+
+int PPMWriter::write(const char *filename,
+ const StreamConfiguration &config,
+ const Span<uint8_t> &data)
+{
+ if (config.pixelFormat != formats::BGR888) {
+ std::cerr << "Only BGR888 output pixel format is supported ("
+ << config.pixelFormat << " requested)" << std::endl;
+ return -EINVAL;
+ }
+
+ std::ofstream output(filename, std::ios::binary);
+ if (!output) {
+ std::cerr << "Failed to open ppm file: " << filename << std::endl;
+ return -EINVAL;
+ }
+
+ output << "P6" << std::endl
+ << config.size.width << " " << config.size.height << std::endl
+ << "255" << std::endl;
+ if (!output) {
+ std::cerr << "Failed to write the file header" << std::endl;
+ return -EINVAL;
+ }
+
+ const unsigned int rowLength = config.size.width * 3;
+ const char *row = reinterpret_cast<const char *>(data.data());
+ for (unsigned int y = 0; y < config.size.height; y++, row += config.stride) {
+ output.write(row, rowLength);
+ if (!output) {
+ std::cerr << "Failed to write image data at row " << y << std::endl;
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/src/apps/common/ppm_writer.h b/src/apps/common/ppm_writer.h
new file mode 100644
index 00000000..8c8d2e15
--- /dev/null
+++ b/src/apps/common/ppm_writer.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat, Inc.
+ *
+ * PPM writer
+ */
+
+#pragma once
+
+#include <libcamera/base/span.h>
+
+#include <libcamera/stream.h>
+
+class PPMWriter
+{
+public:
+ static int write(const char *filename,
+ const libcamera::StreamConfiguration &config,
+ const libcamera::Span<uint8_t> &data);
+};
diff --git a/src/cam/stream_options.cpp b/src/apps/common/stream_options.cpp
index 150bd27c..99239e07 100644
--- a/src/cam/stream_options.cpp
+++ b/src/apps/common/stream_options.cpp
@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * stream_options.cpp - Helper to parse options for streams
+ * Helper to parse options for streams
*/
#include "stream_options.h"
#include <iostream>
+#include <libcamera/color_space.h>
+
using namespace libcamera;
StreamKeyValueParser::StreamKeyValueParser()
@@ -21,15 +23,15 @@ StreamKeyValueParser::StreamKeyValueParser()
ArgumentRequired);
addOption("pixelformat", OptionString, "Pixel format name",
ArgumentRequired);
+ addOption("colorspace", OptionString, "Color space",
+ ArgumentRequired);
}
KeyValueParser::Options StreamKeyValueParser::parse(const char *arguments)
{
KeyValueParser::Options options = KeyValueParser::parse(arguments);
- StreamRole role;
- if (options.valid() && options.isSet("role") &&
- !parseRole(&role, options)) {
+ if (options.valid() && options.isSet("role") && !parseRole(options)) {
std::cerr << "Unknown stream role "
<< options["role"].toString() << std::endl;
options.invalidate();
@@ -38,7 +40,7 @@ KeyValueParser::Options StreamKeyValueParser::parse(const char *arguments)
return options;
}
-StreamRoles StreamKeyValueParser::roles(const OptionValue &values)
+std::vector<StreamRole> StreamKeyValueParser::roles(const OptionValue &values)
{
/* If no configuration values to examine default to viewfinder. */
if (values.empty())
@@ -46,15 +48,10 @@ StreamRoles StreamKeyValueParser::roles(const OptionValue &values)
const std::vector<OptionValue> &streamParameters = values.toArray();
- StreamRoles roles;
+ std::vector<StreamRole> roles;
for (auto const &value : streamParameters) {
- StreamRole role;
-
- /* If role is invalid or not set default to viewfinder. */
- if (!parseRole(&role, value.toKeyValues()))
- role = StreamRole::Viewfinder;
-
- roles.push_back(role);
+ /* If a role is invalid default it to viewfinder. */
+ roles.push_back(parseRole(value.toKeyValues()).value_or(StreamRole::Viewfinder));
}
return roles;
@@ -96,32 +93,29 @@ int StreamKeyValueParser::updateConfiguration(CameraConfiguration *config,
if (opts.isSet("pixelformat"))
cfg.pixelFormat = PixelFormat::fromString(opts["pixelformat"].toString());
+
+ if (opts.isSet("colorspace"))
+ cfg.colorSpace = ColorSpace::fromString(opts["colorspace"].toString());
}
return 0;
}
-bool StreamKeyValueParser::parseRole(StreamRole *role,
- const KeyValueParser::Options &options)
+std::optional<libcamera::StreamRole> StreamKeyValueParser::parseRole(const KeyValueParser::Options &options)
{
if (!options.isSet("role"))
- return false;
+ return {};
std::string name = options["role"].toString();
- if (name == "viewfinder") {
- *role = StreamRole::Viewfinder;
- return true;
- } else if (name == "video") {
- *role = StreamRole::VideoRecording;
- return true;
- } else if (name == "still") {
- *role = StreamRole::StillCapture;
- return true;
- } else if (name == "raw") {
- *role = StreamRole::Raw;
- return true;
- }
+ if (name == "viewfinder")
+ return StreamRole::Viewfinder;
+ else if (name == "video")
+ return StreamRole::VideoRecording;
+ else if (name == "still")
+ return StreamRole::StillCapture;
+ else if (name == "raw")
+ return StreamRole::Raw;
- return false;
+ return {};
}
diff --git a/src/cam/stream_options.h b/src/apps/common/stream_options.h
index d235b77f..a93f104c 100644
--- a/src/cam/stream_options.h
+++ b/src/apps/common/stream_options.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * stream_options.h - Helper to parse options for streams
+ * Helper to parse options for streams
*/
#pragma once
+#include <optional>
+
#include <libcamera/camera.h>
#include "options.h"
@@ -18,11 +20,10 @@ public:
KeyValueParser::Options parse(const char *arguments) override;
- static libcamera::StreamRoles roles(const OptionValue &values);
+ static std::vector<libcamera::StreamRole> roles(const OptionValue &values);
static int updateConfiguration(libcamera::CameraConfiguration *config,
const OptionValue &values);
private:
- static bool parseRole(libcamera::StreamRole *role,
- const KeyValueParser::Options &options);
+ static std::optional<libcamera::StreamRole> parseRole(const KeyValueParser::Options &options);
};
diff --git a/src/apps/ipa-verify/main.cpp b/src/apps/ipa-verify/main.cpp
new file mode 100644
index 00000000..0903cd85
--- /dev/null
+++ b/src/apps/ipa-verify/main.cpp
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023, Ideas on Board Oy
+ *
+ * Verify signature on an IPA module
+ */
+
+#include <iostream>
+#include <libgen.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/ipa_module.h"
+
+using namespace libcamera;
+
+namespace {
+
+bool isSignatureValid(IPAModule *ipa)
+{
+ File file{ ipa->path() };
+ if (!file.open(File::OpenModeFlag::ReadOnly))
+ return false;
+
+ Span<uint8_t> data = file.map();
+ if (data.empty())
+ return false;
+
+ return IPAManager::pubKey().verify(data, ipa->signature());
+}
+
+void usage(char *argv0)
+{
+ std::cout << "Usage: " << basename(argv0) << " ipa_name.so" << std::endl;
+ std::cout << std::endl;
+ std::cout << "Verify the signature of an IPA module. The signature file ipa_name.so.sign is" << std::endl;
+ std::cout << "expected to be in the same directory as the IPA module." << std::endl;
+}
+
+} /* namespace */
+
+int main(int argc, char **argv)
+{
+ if (argc != 2) {
+ usage(argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ IPAModule module{ argv[1] };
+ if (!module.isValid()) {
+ std::cout << "Invalid IPA module " << argv[1] << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ if (!isSignatureValid(&module)) {
+ std::cout << "IPA module signature is invalid" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ std::cout << "IPA module signature is valid" << std::endl;
+ return 0;
+}
diff --git a/src/apps/ipa-verify/meson.build b/src/apps/ipa-verify/meson.build
new file mode 100644
index 00000000..7fdda3b9
--- /dev/null
+++ b/src/apps/ipa-verify/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+if not ipa_sign_module
+ subdir_done()
+endif
+
+ipa_verify_sources = files([
+ 'main.cpp',
+])
+
+ipa_verify = executable('ipa_verify', ipa_verify_sources,
+ dependencies : [
+ libcamera_private,
+ ],
+ install : false)
diff --git a/src/lc-compliance/environment.cpp b/src/apps/lc-compliance/environment.cpp
index 5eb3775f..987264f1 100644
--- a/src/lc-compliance/environment.cpp
+++ b/src/apps/lc-compliance/environment.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Collabora Ltd.
*
- * environment.cpp - Common environment for tests
+ * Common environment for tests
*/
#include "environment.h"
diff --git a/src/lc-compliance/environment.h b/src/apps/lc-compliance/environment.h
index 0debbcce..543e5372 100644
--- a/src/lc-compliance/environment.h
+++ b/src/apps/lc-compliance/environment.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Collabora Ltd.
*
- * environment.h - Common environment for tests
+ * Common environment for tests
*/
#pragma once
diff --git a/src/lc-compliance/simple_capture.cpp b/src/apps/lc-compliance/helpers/capture.cpp
index ab5cb35c..90c1530b 100644
--- a/src/lc-compliance/simple_capture.cpp
+++ b/src/apps/lc-compliance/helpers/capture.cpp
@@ -2,27 +2,27 @@
/*
* Copyright (C) 2020-2021, Google Inc.
*
- * simple_capture.cpp - Simple capture helper
+ * Simple capture helper
*/
-#include <gtest/gtest.h>
+#include "capture.h"
-#include "simple_capture.h"
+#include <gtest/gtest.h>
using namespace libcamera;
-SimpleCapture::SimpleCapture(std::shared_ptr<Camera> camera)
+Capture::Capture(std::shared_ptr<Camera> camera)
: loop_(nullptr), camera_(camera),
allocator_(std::make_unique<FrameBufferAllocator>(camera))
{
}
-SimpleCapture::~SimpleCapture()
+Capture::~Capture()
{
stop();
}
-void SimpleCapture::configure(StreamRole role)
+void Capture::configure(StreamRole role)
{
config_ = camera_->generateConfiguration({ role });
@@ -42,7 +42,7 @@ void SimpleCapture::configure(StreamRole role)
}
}
-void SimpleCapture::start()
+void Capture::start()
{
Stream *stream = config_->at(0).stream();
int count = allocator_->allocate(stream);
@@ -50,12 +50,12 @@ void SimpleCapture::start()
ASSERT_GE(count, 0) << "Failed to allocate buffers";
EXPECT_EQ(count, config_->at(0).bufferCount) << "Allocated less buffers than expected";
- camera_->requestCompleted.connect(this, &SimpleCapture::requestComplete);
+ camera_->requestCompleted.connect(this, &Capture::requestComplete);
ASSERT_EQ(camera_->start(), 0) << "Failed to start camera";
}
-void SimpleCapture::stop()
+void Capture::stop()
{
if (!config_ || !allocator_->allocated())
return;
@@ -65,17 +65,18 @@ void SimpleCapture::stop()
camera_->requestCompleted.disconnect(this);
Stream *stream = config_->at(0).stream();
+ requests_.clear();
allocator_->free(stream);
}
-/* SimpleCaptureBalanced */
+/* CaptureBalanced */
-SimpleCaptureBalanced::SimpleCaptureBalanced(std::shared_ptr<Camera> camera)
- : SimpleCapture(camera)
+CaptureBalanced::CaptureBalanced(std::shared_ptr<Camera> camera)
+ : Capture(camera)
{
}
-void SimpleCaptureBalanced::capture(unsigned int numRequests)
+void CaptureBalanced::capture(unsigned int numRequests)
{
start();
@@ -94,8 +95,7 @@ void SimpleCaptureBalanced::capture(unsigned int numRequests)
captureCount_ = 0;
captureLimit_ = numRequests;
- /* Queue the recommended number of reqeuests. */
- std::vector<std::unique_ptr<libcamera::Request>> requests;
+ /* Queue the recommended number of requests. */
for (const std::unique_ptr<FrameBuffer> &buffer : buffers) {
std::unique_ptr<Request> request = camera_->createRequest();
ASSERT_TRUE(request) << "Can't create request";
@@ -104,7 +104,7 @@ void SimpleCaptureBalanced::capture(unsigned int numRequests)
ASSERT_EQ(queueRequest(request.get()), 0) << "Failed to queue request";
- requests.push_back(std::move(request));
+ requests_.push_back(std::move(request));
}
/* Run capture session. */
@@ -116,7 +116,7 @@ void SimpleCaptureBalanced::capture(unsigned int numRequests)
ASSERT_EQ(captureCount_, captureLimit_);
}
-int SimpleCaptureBalanced::queueRequest(Request *request)
+int CaptureBalanced::queueRequest(Request *request)
{
queueCount_++;
if (queueCount_ > captureLimit_)
@@ -125,8 +125,11 @@ int SimpleCaptureBalanced::queueRequest(Request *request)
return camera_->queueRequest(request);
}
-void SimpleCaptureBalanced::requestComplete(Request *request)
+void CaptureBalanced::requestComplete(Request *request)
{
+ EXPECT_EQ(request->status(), Request::Status::RequestComplete)
+ << "Request didn't complete successfully";
+
captureCount_++;
if (captureCount_ >= captureLimit_) {
loop_->exit(0);
@@ -138,14 +141,14 @@ void SimpleCaptureBalanced::requestComplete(Request *request)
loop_->exit(-EINVAL);
}
-/* SimpleCaptureUnbalanced */
+/* CaptureUnbalanced */
-SimpleCaptureUnbalanced::SimpleCaptureUnbalanced(std::shared_ptr<Camera> camera)
- : SimpleCapture(camera)
+CaptureUnbalanced::CaptureUnbalanced(std::shared_ptr<Camera> camera)
+ : Capture(camera)
{
}
-void SimpleCaptureUnbalanced::capture(unsigned int numRequests)
+void CaptureUnbalanced::capture(unsigned int numRequests)
{
start();
@@ -155,8 +158,7 @@ void SimpleCaptureUnbalanced::capture(unsigned int numRequests)
captureCount_ = 0;
captureLimit_ = numRequests;
- /* Queue the recommended number of reqeuests. */
- std::vector<std::unique_ptr<libcamera::Request>> requests;
+ /* Queue the recommended number of requests. */
for (const std::unique_ptr<FrameBuffer> &buffer : buffers) {
std::unique_ptr<Request> request = camera_->createRequest();
ASSERT_TRUE(request) << "Can't create request";
@@ -165,7 +167,7 @@ void SimpleCaptureUnbalanced::capture(unsigned int numRequests)
ASSERT_EQ(camera_->queueRequest(request.get()), 0) << "Failed to queue request";
- requests.push_back(std::move(request));
+ requests_.push_back(std::move(request));
}
/* Run capture session. */
@@ -177,7 +179,7 @@ void SimpleCaptureUnbalanced::capture(unsigned int numRequests)
ASSERT_EQ(status, 0);
}
-void SimpleCaptureUnbalanced::requestComplete(Request *request)
+void CaptureUnbalanced::requestComplete(Request *request)
{
captureCount_++;
if (captureCount_ >= captureLimit_) {
@@ -185,6 +187,9 @@ void SimpleCaptureUnbalanced::requestComplete(Request *request)
return;
}
+ EXPECT_EQ(request->status(), Request::Status::RequestComplete)
+ << "Request didn't complete successfully";
+
request->reuse(Request::ReuseBuffers);
if (camera_->queueRequest(request))
loop_->exit(-EINVAL);
diff --git a/src/lc-compliance/simple_capture.h b/src/apps/lc-compliance/helpers/capture.h
index 9d31f7cb..19b6927c 100644
--- a/src/lc-compliance/simple_capture.h
+++ b/src/apps/lc-compliance/helpers/capture.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020-2021, Google Inc.
*
- * simple_capture.h - Simple capture helper
+ * Simple capture helper
*/
#pragma once
@@ -11,16 +11,16 @@
#include <libcamera/libcamera.h>
-#include "../cam/event_loop.h"
+#include "../common/event_loop.h"
-class SimpleCapture
+class Capture
{
public:
void configure(libcamera::StreamRole role);
protected:
- SimpleCapture(std::shared_ptr<libcamera::Camera> camera);
- virtual ~SimpleCapture();
+ Capture(std::shared_ptr<libcamera::Camera> camera);
+ virtual ~Capture();
void start();
void stop();
@@ -32,12 +32,13 @@ protected:
std::shared_ptr<libcamera::Camera> camera_;
std::unique_ptr<libcamera::FrameBufferAllocator> allocator_;
std::unique_ptr<libcamera::CameraConfiguration> config_;
+ std::vector<std::unique_ptr<libcamera::Request>> requests_;
};
-class SimpleCaptureBalanced : public SimpleCapture
+class CaptureBalanced : public Capture
{
public:
- SimpleCaptureBalanced(std::shared_ptr<libcamera::Camera> camera);
+ CaptureBalanced(std::shared_ptr<libcamera::Camera> camera);
void capture(unsigned int numRequests);
@@ -50,10 +51,10 @@ private:
unsigned int captureLimit_;
};
-class SimpleCaptureUnbalanced : public SimpleCapture
+class CaptureUnbalanced : public Capture
{
public:
- SimpleCaptureUnbalanced(std::shared_ptr<libcamera::Camera> camera);
+ CaptureUnbalanced(std::shared_ptr<libcamera::Camera> camera);
void capture(unsigned int numRequests);
diff --git a/src/lc-compliance/main.cpp b/src/apps/lc-compliance/main.cpp
index 7eb52ae4..3f1d2a61 100644
--- a/src/lc-compliance/main.cpp
+++ b/src/apps/lc-compliance/main.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Google Inc.
* Copyright (C) 2021, Collabora Ltd.
*
- * main.cpp - lc-compliance - The libcamera compliance tool
+ * lc-compliance - The libcamera compliance tool
*/
#include <iomanip>
@@ -14,8 +14,9 @@
#include <libcamera/libcamera.h>
+#include "../common/options.h"
+
#include "environment.h"
-#include "../cam/options.h"
using namespace libcamera;
diff --git a/src/lc-compliance/meson.build b/src/apps/lc-compliance/meson.build
index 8b57474b..b1f605f3 100644
--- a/src/lc-compliance/meson.build
+++ b/src/apps/lc-compliance/meson.build
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: CC0-1.0
-libevent = dependency('libevent_pthreads', required : get_option('lc-compliance'))
-libgtest = dependency('gtest', required : get_option('lc-compliance'),
+libgtest = dependency('gtest', version : '>=1.10.0',
+ required : get_option('lc-compliance'),
fallback : ['gtest', 'gtest_dep'])
-if not (libevent.found() and libgtest.found())
+if opt_lc_compliance.disabled() or not libevent.found() or not libgtest.found()
lc_compliance_enabled = false
subdir_done()
endif
@@ -12,20 +12,26 @@ endif
lc_compliance_enabled = true
lc_compliance_sources = files([
- '../cam/event_loop.cpp',
- '../cam/options.cpp',
'environment.cpp',
+ 'helpers/capture.cpp',
'main.cpp',
- 'simple_capture.cpp',
- 'capture_test.cpp',
+ 'tests/capture_test.cpp',
+])
+
+lc_compliance_includes = ([
+ include_directories('.'),
+ include_directories('helpers/')
])
lc_compliance = executable('lc-compliance', lc_compliance_sources,
cpp_args : [ '-fexceptions' ],
+ link_with : apps_lib,
dependencies : [
libatomic,
libcamera_public,
libevent,
libgtest,
],
- install : true)
+ include_directories : lc_compliance_includes,
+ install : true,
+ install_tag : 'bin-devel')
diff --git a/src/lc-compliance/capture_test.cpp b/src/apps/lc-compliance/tests/capture_test.cpp
index 52578207..ad3a1da2 100644
--- a/src/lc-compliance/capture_test.cpp
+++ b/src/apps/lc-compliance/tests/capture_test.cpp
@@ -3,20 +3,26 @@
* Copyright (C) 2020, Google Inc.
* Copyright (C) 2021, Collabora Ltd.
*
- * capture_test.cpp - Test camera capture
+ * Test camera capture
*/
+#include "capture.h"
+
#include <iostream>
#include <gtest/gtest.h>
#include "environment.h"
-#include "simple_capture.h"
using namespace libcamera;
const std::vector<int> NUMREQUESTS = { 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 };
-const std::vector<StreamRole> ROLES = { Raw, StillCapture, VideoRecording, Viewfinder };
+const std::vector<StreamRole> ROLES = {
+ StreamRole::Raw,
+ StreamRole::StillCapture,
+ StreamRole::VideoRecording,
+ StreamRole::Viewfinder
+};
class SingleStream : public testing::TestWithParam<std::tuple<StreamRole, int>>
{
@@ -54,10 +60,12 @@ void SingleStream::TearDown()
std::string SingleStream::nameParameters(const testing::TestParamInfo<SingleStream::ParamType> &info)
{
- std::map<StreamRole, std::string> rolesMap = { { Raw, "Raw" },
- { StillCapture, "StillCapture" },
- { VideoRecording, "VideoRecording" },
- { Viewfinder, "Viewfinder" } };
+ std::map<StreamRole, std::string> rolesMap = {
+ { StreamRole::Raw, "Raw" },
+ { StreamRole::StillCapture, "StillCapture" },
+ { StreamRole::VideoRecording, "VideoRecording" },
+ { StreamRole::Viewfinder, "Viewfinder" }
+ };
std::string roleName = rolesMap[std::get<0>(info.param)];
std::string numRequestsName = std::to_string(std::get<1>(info.param));
@@ -76,7 +84,7 @@ TEST_P(SingleStream, Capture)
{
auto [role, numRequests] = GetParam();
- SimpleCaptureBalanced capture(camera_);
+ CaptureBalanced capture(camera_);
capture.configure(role);
@@ -95,7 +103,7 @@ TEST_P(SingleStream, CaptureStartStop)
auto [role, numRequests] = GetParam();
unsigned int numRepeats = 3;
- SimpleCaptureBalanced capture(camera_);
+ CaptureBalanced capture(camera_);
capture.configure(role);
@@ -114,7 +122,7 @@ TEST_P(SingleStream, UnbalancedStop)
{
auto [role, numRequests] = GetParam();
- SimpleCaptureUnbalanced capture(camera_);
+ CaptureUnbalanced capture(camera_);
capture.configure(role);
diff --git a/src/apps/meson.build b/src/apps/meson.build
new file mode 100644
index 00000000..af632b9a
--- /dev/null
+++ b/src/apps/meson.build
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: CC0-1.0
+
+opt_cam = get_option('cam')
+opt_lc_compliance = get_option('lc-compliance')
+
+# libevent is needed by cam and lc-compliance. As they are both feature options,
+# they can't be combined with simple boolean logic.
+libevent = dependency('libevent_pthreads', required : opt_cam)
+if not libevent.found()
+ libevent = dependency('libevent_pthreads', required : opt_lc_compliance)
+endif
+
+libtiff = dependency('libtiff-4', required : false)
+
+subdir('common')
+
+subdir('lc-compliance')
+
+subdir('cam')
+subdir('qcam')
+
+subdir('ipa-verify')
diff --git a/src/qcam/assets/feathericons/activity.svg b/src/apps/qcam/assets/feathericons/activity.svg
index 669a57a7..669a57a7 100644
--- a/src/qcam/assets/feathericons/activity.svg
+++ b/src/apps/qcam/assets/feathericons/activity.svg
diff --git a/src/qcam/assets/feathericons/airplay.svg b/src/apps/qcam/assets/feathericons/airplay.svg
index 7ce73022..7ce73022 100644
--- a/src/qcam/assets/feathericons/airplay.svg
+++ b/src/apps/qcam/assets/feathericons/airplay.svg
diff --git a/src/qcam/assets/feathericons/alert-circle.svg b/src/apps/qcam/assets/feathericons/alert-circle.svg
index 8d02b7d1..8d02b7d1 100644
--- a/src/qcam/assets/feathericons/alert-circle.svg
+++ b/src/apps/qcam/assets/feathericons/alert-circle.svg
diff --git a/src/qcam/assets/feathericons/alert-octagon.svg b/src/apps/qcam/assets/feathericons/alert-octagon.svg
index de9b03f2..de9b03f2 100644
--- a/src/qcam/assets/feathericons/alert-octagon.svg
+++ b/src/apps/qcam/assets/feathericons/alert-octagon.svg
diff --git a/src/qcam/assets/feathericons/alert-triangle.svg b/src/apps/qcam/assets/feathericons/alert-triangle.svg
index 6dcb0963..6dcb0963 100644
--- a/src/qcam/assets/feathericons/alert-triangle.svg
+++ b/src/apps/qcam/assets/feathericons/alert-triangle.svg
diff --git a/src/qcam/assets/feathericons/align-center.svg b/src/apps/qcam/assets/feathericons/align-center.svg
index 5b8842ea..5b8842ea 100644
--- a/src/qcam/assets/feathericons/align-center.svg
+++ b/src/apps/qcam/assets/feathericons/align-center.svg
diff --git a/src/qcam/assets/feathericons/align-justify.svg b/src/apps/qcam/assets/feathericons/align-justify.svg
index 0539876f..0539876f 100644
--- a/src/qcam/assets/feathericons/align-justify.svg
+++ b/src/apps/qcam/assets/feathericons/align-justify.svg
diff --git a/src/qcam/assets/feathericons/align-left.svg b/src/apps/qcam/assets/feathericons/align-left.svg
index 9ac852a5..9ac852a5 100644
--- a/src/qcam/assets/feathericons/align-left.svg
+++ b/src/apps/qcam/assets/feathericons/align-left.svg
diff --git a/src/qcam/assets/feathericons/align-right.svg b/src/apps/qcam/assets/feathericons/align-right.svg
index ef139ffa..ef139ffa 100644
--- a/src/qcam/assets/feathericons/align-right.svg
+++ b/src/apps/qcam/assets/feathericons/align-right.svg
diff --git a/src/qcam/assets/feathericons/anchor.svg b/src/apps/qcam/assets/feathericons/anchor.svg
index e01627a3..e01627a3 100644
--- a/src/qcam/assets/feathericons/anchor.svg
+++ b/src/apps/qcam/assets/feathericons/anchor.svg
diff --git a/src/qcam/assets/feathericons/aperture.svg b/src/apps/qcam/assets/feathericons/aperture.svg
index 9936e868..9936e868 100644
--- a/src/qcam/assets/feathericons/aperture.svg
+++ b/src/apps/qcam/assets/feathericons/aperture.svg
diff --git a/src/qcam/assets/feathericons/archive.svg b/src/apps/qcam/assets/feathericons/archive.svg
index 428882c8..428882c8 100644
--- a/src/qcam/assets/feathericons/archive.svg
+++ b/src/apps/qcam/assets/feathericons/archive.svg
diff --git a/src/qcam/assets/feathericons/arrow-down-circle.svg b/src/apps/qcam/assets/feathericons/arrow-down-circle.svg
index 3238091b..3238091b 100644
--- a/src/qcam/assets/feathericons/arrow-down-circle.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-down-circle.svg
diff --git a/src/qcam/assets/feathericons/arrow-down-left.svg b/src/apps/qcam/assets/feathericons/arrow-down-left.svg
index 72483584..72483584 100644
--- a/src/qcam/assets/feathericons/arrow-down-left.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-down-left.svg
diff --git a/src/qcam/assets/feathericons/arrow-down-right.svg b/src/apps/qcam/assets/feathericons/arrow-down-right.svg
index 81d9822b..81d9822b 100644
--- a/src/qcam/assets/feathericons/arrow-down-right.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-down-right.svg
diff --git a/src/qcam/assets/feathericons/arrow-down.svg b/src/apps/qcam/assets/feathericons/arrow-down.svg
index 4f84f627..4f84f627 100644
--- a/src/qcam/assets/feathericons/arrow-down.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-down.svg
diff --git a/src/qcam/assets/feathericons/arrow-left-circle.svg b/src/apps/qcam/assets/feathericons/arrow-left-circle.svg
index 3b19ff8a..3b19ff8a 100644
--- a/src/qcam/assets/feathericons/arrow-left-circle.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-left-circle.svg
diff --git a/src/qcam/assets/feathericons/arrow-left.svg b/src/apps/qcam/assets/feathericons/arrow-left.svg
index a5058fc7..a5058fc7 100644
--- a/src/qcam/assets/feathericons/arrow-left.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-left.svg
diff --git a/src/qcam/assets/feathericons/arrow-right-circle.svg b/src/apps/qcam/assets/feathericons/arrow-right-circle.svg
index ff01dd58..ff01dd58 100644
--- a/src/qcam/assets/feathericons/arrow-right-circle.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-right-circle.svg
diff --git a/src/qcam/assets/feathericons/arrow-right.svg b/src/apps/qcam/assets/feathericons/arrow-right.svg
index 939b57c5..939b57c5 100644
--- a/src/qcam/assets/feathericons/arrow-right.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-right.svg
diff --git a/src/qcam/assets/feathericons/arrow-up-circle.svg b/src/apps/qcam/assets/feathericons/arrow-up-circle.svg
index 044a75d3..044a75d3 100644
--- a/src/qcam/assets/feathericons/arrow-up-circle.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-up-circle.svg
diff --git a/src/qcam/assets/feathericons/arrow-up-left.svg b/src/apps/qcam/assets/feathericons/arrow-up-left.svg
index cea55e87..cea55e87 100644
--- a/src/qcam/assets/feathericons/arrow-up-left.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-up-left.svg
diff --git a/src/qcam/assets/feathericons/arrow-up-right.svg b/src/apps/qcam/assets/feathericons/arrow-up-right.svg
index 95678e00..95678e00 100644
--- a/src/qcam/assets/feathericons/arrow-up-right.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-up-right.svg
diff --git a/src/qcam/assets/feathericons/arrow-up.svg b/src/apps/qcam/assets/feathericons/arrow-up.svg
index 16b13aba..16b13aba 100644
--- a/src/qcam/assets/feathericons/arrow-up.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-up.svg
diff --git a/src/qcam/assets/feathericons/at-sign.svg b/src/apps/qcam/assets/feathericons/at-sign.svg
index 5a5e5d0d..5a5e5d0d 100644
--- a/src/qcam/assets/feathericons/at-sign.svg
+++ b/src/apps/qcam/assets/feathericons/at-sign.svg
diff --git a/src/qcam/assets/feathericons/award.svg b/src/apps/qcam/assets/feathericons/award.svg
index be70d5a1..be70d5a1 100644
--- a/src/qcam/assets/feathericons/award.svg
+++ b/src/apps/qcam/assets/feathericons/award.svg
diff --git a/src/qcam/assets/feathericons/bar-chart-2.svg b/src/apps/qcam/assets/feathericons/bar-chart-2.svg
index 864167a6..864167a6 100644
--- a/src/qcam/assets/feathericons/bar-chart-2.svg
+++ b/src/apps/qcam/assets/feathericons/bar-chart-2.svg
diff --git a/src/qcam/assets/feathericons/bar-chart.svg b/src/apps/qcam/assets/feathericons/bar-chart.svg
index 074d7c1a..074d7c1a 100644
--- a/src/qcam/assets/feathericons/bar-chart.svg
+++ b/src/apps/qcam/assets/feathericons/bar-chart.svg
diff --git a/src/qcam/assets/feathericons/battery-charging.svg b/src/apps/qcam/assets/feathericons/battery-charging.svg
index 644cb59c..644cb59c 100644
--- a/src/qcam/assets/feathericons/battery-charging.svg
+++ b/src/apps/qcam/assets/feathericons/battery-charging.svg
diff --git a/src/qcam/assets/feathericons/battery.svg b/src/apps/qcam/assets/feathericons/battery.svg
index 7fe87710..7fe87710 100644
--- a/src/qcam/assets/feathericons/battery.svg
+++ b/src/apps/qcam/assets/feathericons/battery.svg
diff --git a/src/qcam/assets/feathericons/bell-off.svg b/src/apps/qcam/assets/feathericons/bell-off.svg
index 4b07c848..4b07c848 100644
--- a/src/qcam/assets/feathericons/bell-off.svg
+++ b/src/apps/qcam/assets/feathericons/bell-off.svg
diff --git a/src/qcam/assets/feathericons/bell.svg b/src/apps/qcam/assets/feathericons/bell.svg
index bba561c1..bba561c1 100644
--- a/src/qcam/assets/feathericons/bell.svg
+++ b/src/apps/qcam/assets/feathericons/bell.svg
diff --git a/src/qcam/assets/feathericons/bluetooth.svg b/src/apps/qcam/assets/feathericons/bluetooth.svg
index cebed7b1..cebed7b1 100644
--- a/src/qcam/assets/feathericons/bluetooth.svg
+++ b/src/apps/qcam/assets/feathericons/bluetooth.svg
diff --git a/src/qcam/assets/feathericons/bold.svg b/src/apps/qcam/assets/feathericons/bold.svg
index d1a4efd3..d1a4efd3 100644
--- a/src/qcam/assets/feathericons/bold.svg
+++ b/src/apps/qcam/assets/feathericons/bold.svg
diff --git a/src/qcam/assets/feathericons/book-open.svg b/src/apps/qcam/assets/feathericons/book-open.svg
index 5e0ca0ab..5e0ca0ab 100644
--- a/src/qcam/assets/feathericons/book-open.svg
+++ b/src/apps/qcam/assets/feathericons/book-open.svg
diff --git a/src/qcam/assets/feathericons/book.svg b/src/apps/qcam/assets/feathericons/book.svg
index 12ffcbc4..12ffcbc4 100644
--- a/src/qcam/assets/feathericons/book.svg
+++ b/src/apps/qcam/assets/feathericons/book.svg
diff --git a/src/qcam/assets/feathericons/bookmark.svg b/src/apps/qcam/assets/feathericons/bookmark.svg
index 2239cc58..2239cc58 100644
--- a/src/qcam/assets/feathericons/bookmark.svg
+++ b/src/apps/qcam/assets/feathericons/bookmark.svg
diff --git a/src/qcam/assets/feathericons/box.svg b/src/apps/qcam/assets/feathericons/box.svg
index d89be30f..d89be30f 100644
--- a/src/qcam/assets/feathericons/box.svg
+++ b/src/apps/qcam/assets/feathericons/box.svg
diff --git a/src/qcam/assets/feathericons/briefcase.svg b/src/apps/qcam/assets/feathericons/briefcase.svg
index e3af0506..e3af0506 100644
--- a/src/qcam/assets/feathericons/briefcase.svg
+++ b/src/apps/qcam/assets/feathericons/briefcase.svg
diff --git a/src/qcam/assets/feathericons/calendar.svg b/src/apps/qcam/assets/feathericons/calendar.svg
index 6c7fd870..6c7fd870 100644
--- a/src/qcam/assets/feathericons/calendar.svg
+++ b/src/apps/qcam/assets/feathericons/calendar.svg
diff --git a/src/qcam/assets/feathericons/camera-off.svg b/src/apps/qcam/assets/feathericons/camera-off.svg
index daa3e25f..daa3e25f 100644
--- a/src/qcam/assets/feathericons/camera-off.svg
+++ b/src/apps/qcam/assets/feathericons/camera-off.svg
diff --git a/src/qcam/assets/feathericons/camera.svg b/src/apps/qcam/assets/feathericons/camera.svg
index 0e7f0603..0e7f0603 100644
--- a/src/qcam/assets/feathericons/camera.svg
+++ b/src/apps/qcam/assets/feathericons/camera.svg
diff --git a/src/qcam/assets/feathericons/cast.svg b/src/apps/qcam/assets/feathericons/cast.svg
index 63c954d9..63c954d9 100644
--- a/src/qcam/assets/feathericons/cast.svg
+++ b/src/apps/qcam/assets/feathericons/cast.svg
diff --git a/src/qcam/assets/feathericons/check-circle.svg b/src/apps/qcam/assets/feathericons/check-circle.svg
index f2f4fd1a..f2f4fd1a 100644
--- a/src/qcam/assets/feathericons/check-circle.svg
+++ b/src/apps/qcam/assets/feathericons/check-circle.svg
diff --git a/src/qcam/assets/feathericons/check-square.svg b/src/apps/qcam/assets/feathericons/check-square.svg
index 72ab7a80..72ab7a80 100644
--- a/src/qcam/assets/feathericons/check-square.svg
+++ b/src/apps/qcam/assets/feathericons/check-square.svg
diff --git a/src/qcam/assets/feathericons/check.svg b/src/apps/qcam/assets/feathericons/check.svg
index 1c209899..1c209899 100644
--- a/src/qcam/assets/feathericons/check.svg
+++ b/src/apps/qcam/assets/feathericons/check.svg
diff --git a/src/qcam/assets/feathericons/chevron-down.svg b/src/apps/qcam/assets/feathericons/chevron-down.svg
index 278c6a31..278c6a31 100644
--- a/src/qcam/assets/feathericons/chevron-down.svg
+++ b/src/apps/qcam/assets/feathericons/chevron-down.svg
diff --git a/src/qcam/assets/feathericons/chevron-left.svg b/src/apps/qcam/assets/feathericons/chevron-left.svg
index 747d46d9..747d46d9 100644
--- a/src/qcam/assets/feathericons/chevron-left.svg
+++ b/src/apps/qcam/assets/feathericons/chevron-left.svg
diff --git a/src/qcam/assets/feathericons/chevron-right.svg b/src/apps/qcam/assets/feathericons/chevron-right.svg
index 258de414..258de414 100644
--- a/src/qcam/assets/feathericons/chevron-right.svg
+++ b/src/apps/qcam/assets/feathericons/chevron-right.svg
diff --git a/src/qcam/assets/feathericons/chevron-up.svg b/src/apps/qcam/assets/feathericons/chevron-up.svg
index 4eb5ecc3..4eb5ecc3 100644
--- a/src/qcam/assets/feathericons/chevron-up.svg
+++ b/src/apps/qcam/assets/feathericons/chevron-up.svg
diff --git a/src/qcam/assets/feathericons/chevrons-down.svg b/src/apps/qcam/assets/feathericons/chevrons-down.svg
index e67ef2fb..e67ef2fb 100644
--- a/src/qcam/assets/feathericons/chevrons-down.svg
+++ b/src/apps/qcam/assets/feathericons/chevrons-down.svg
diff --git a/src/qcam/assets/feathericons/chevrons-left.svg b/src/apps/qcam/assets/feathericons/chevrons-left.svg
index c32e3983..c32e3983 100644
--- a/src/qcam/assets/feathericons/chevrons-left.svg
+++ b/src/apps/qcam/assets/feathericons/chevrons-left.svg
diff --git a/src/qcam/assets/feathericons/chevrons-right.svg b/src/apps/qcam/assets/feathericons/chevrons-right.svg
index f5068145..f5068145 100644
--- a/src/qcam/assets/feathericons/chevrons-right.svg
+++ b/src/apps/qcam/assets/feathericons/chevrons-right.svg
diff --git a/src/qcam/assets/feathericons/chevrons-up.svg b/src/apps/qcam/assets/feathericons/chevrons-up.svg
index 0eaf5183..0eaf5183 100644
--- a/src/qcam/assets/feathericons/chevrons-up.svg
+++ b/src/apps/qcam/assets/feathericons/chevrons-up.svg
diff --git a/src/qcam/assets/feathericons/chrome.svg b/src/apps/qcam/assets/feathericons/chrome.svg
index 9189815e..9189815e 100644
--- a/src/qcam/assets/feathericons/chrome.svg
+++ b/src/apps/qcam/assets/feathericons/chrome.svg
diff --git a/src/qcam/assets/feathericons/circle.svg b/src/apps/qcam/assets/feathericons/circle.svg
index b0090882..b0090882 100644
--- a/src/qcam/assets/feathericons/circle.svg
+++ b/src/apps/qcam/assets/feathericons/circle.svg
diff --git a/src/qcam/assets/feathericons/clipboard.svg b/src/apps/qcam/assets/feathericons/clipboard.svg
index ccee454d..ccee454d 100644
--- a/src/qcam/assets/feathericons/clipboard.svg
+++ b/src/apps/qcam/assets/feathericons/clipboard.svg
diff --git a/src/qcam/assets/feathericons/clock.svg b/src/apps/qcam/assets/feathericons/clock.svg
index ea3f5e50..ea3f5e50 100644
--- a/src/qcam/assets/feathericons/clock.svg
+++ b/src/apps/qcam/assets/feathericons/clock.svg
diff --git a/src/qcam/assets/feathericons/cloud-drizzle.svg b/src/apps/qcam/assets/feathericons/cloud-drizzle.svg
index 13af6bb5..13af6bb5 100644
--- a/src/qcam/assets/feathericons/cloud-drizzle.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-drizzle.svg
diff --git a/src/qcam/assets/feathericons/cloud-lightning.svg b/src/apps/qcam/assets/feathericons/cloud-lightning.svg
index 32d154cc..32d154cc 100644
--- a/src/qcam/assets/feathericons/cloud-lightning.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-lightning.svg
diff --git a/src/qcam/assets/feathericons/cloud-off.svg b/src/apps/qcam/assets/feathericons/cloud-off.svg
index 1e1e7d60..1e1e7d60 100644
--- a/src/qcam/assets/feathericons/cloud-off.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-off.svg
diff --git a/src/qcam/assets/feathericons/cloud-rain.svg b/src/apps/qcam/assets/feathericons/cloud-rain.svg
index 3e0b85b0..3e0b85b0 100644
--- a/src/qcam/assets/feathericons/cloud-rain.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-rain.svg
diff --git a/src/qcam/assets/feathericons/cloud-snow.svg b/src/apps/qcam/assets/feathericons/cloud-snow.svg
index e4eb8207..e4eb8207 100644
--- a/src/qcam/assets/feathericons/cloud-snow.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-snow.svg
diff --git a/src/qcam/assets/feathericons/cloud.svg b/src/apps/qcam/assets/feathericons/cloud.svg
index 0ee0c632..0ee0c632 100644
--- a/src/qcam/assets/feathericons/cloud.svg
+++ b/src/apps/qcam/assets/feathericons/cloud.svg
diff --git a/src/qcam/assets/feathericons/code.svg b/src/apps/qcam/assets/feathericons/code.svg
index c4954b55..c4954b55 100644
--- a/src/qcam/assets/feathericons/code.svg
+++ b/src/apps/qcam/assets/feathericons/code.svg
diff --git a/src/qcam/assets/feathericons/codepen.svg b/src/apps/qcam/assets/feathericons/codepen.svg
index ab2a815a..ab2a815a 100644
--- a/src/qcam/assets/feathericons/codepen.svg
+++ b/src/apps/qcam/assets/feathericons/codepen.svg
diff --git a/src/qcam/assets/feathericons/codesandbox.svg b/src/apps/qcam/assets/feathericons/codesandbox.svg
index 49848f52..49848f52 100644
--- a/src/qcam/assets/feathericons/codesandbox.svg
+++ b/src/apps/qcam/assets/feathericons/codesandbox.svg
diff --git a/src/qcam/assets/feathericons/coffee.svg b/src/apps/qcam/assets/feathericons/coffee.svg
index 32905e52..32905e52 100644
--- a/src/qcam/assets/feathericons/coffee.svg
+++ b/src/apps/qcam/assets/feathericons/coffee.svg
diff --git a/src/qcam/assets/feathericons/columns.svg b/src/apps/qcam/assets/feathericons/columns.svg
index d264b557..d264b557 100644
--- a/src/qcam/assets/feathericons/columns.svg
+++ b/src/apps/qcam/assets/feathericons/columns.svg
diff --git a/src/qcam/assets/feathericons/command.svg b/src/apps/qcam/assets/feathericons/command.svg
index 93f554c3..93f554c3 100644
--- a/src/qcam/assets/feathericons/command.svg
+++ b/src/apps/qcam/assets/feathericons/command.svg
diff --git a/src/qcam/assets/feathericons/compass.svg b/src/apps/qcam/assets/feathericons/compass.svg
index 32962608..32962608 100644
--- a/src/qcam/assets/feathericons/compass.svg
+++ b/src/apps/qcam/assets/feathericons/compass.svg
diff --git a/src/qcam/assets/feathericons/copy.svg b/src/apps/qcam/assets/feathericons/copy.svg
index 4e0b09f1..4e0b09f1 100644
--- a/src/qcam/assets/feathericons/copy.svg
+++ b/src/apps/qcam/assets/feathericons/copy.svg
diff --git a/src/qcam/assets/feathericons/corner-down-left.svg b/src/apps/qcam/assets/feathericons/corner-down-left.svg
index 9fffb3e9..9fffb3e9 100644
--- a/src/qcam/assets/feathericons/corner-down-left.svg
+++ b/src/apps/qcam/assets/feathericons/corner-down-left.svg
diff --git a/src/qcam/assets/feathericons/corner-down-right.svg b/src/apps/qcam/assets/feathericons/corner-down-right.svg
index b27d408d..b27d408d 100644
--- a/src/qcam/assets/feathericons/corner-down-right.svg
+++ b/src/apps/qcam/assets/feathericons/corner-down-right.svg
diff --git a/src/qcam/assets/feathericons/corner-left-down.svg b/src/apps/qcam/assets/feathericons/corner-left-down.svg
index 24b8375c..24b8375c 100644
--- a/src/qcam/assets/feathericons/corner-left-down.svg
+++ b/src/apps/qcam/assets/feathericons/corner-left-down.svg
diff --git a/src/qcam/assets/feathericons/corner-left-up.svg b/src/apps/qcam/assets/feathericons/corner-left-up.svg
index e54527cd..e54527cd 100644
--- a/src/qcam/assets/feathericons/corner-left-up.svg
+++ b/src/apps/qcam/assets/feathericons/corner-left-up.svg
diff --git a/src/qcam/assets/feathericons/corner-right-down.svg b/src/apps/qcam/assets/feathericons/corner-right-down.svg
index a49e6d6c..a49e6d6c 100644
--- a/src/qcam/assets/feathericons/corner-right-down.svg
+++ b/src/apps/qcam/assets/feathericons/corner-right-down.svg
diff --git a/src/qcam/assets/feathericons/corner-right-up.svg b/src/apps/qcam/assets/feathericons/corner-right-up.svg
index a5c5dce5..a5c5dce5 100644
--- a/src/qcam/assets/feathericons/corner-right-up.svg
+++ b/src/apps/qcam/assets/feathericons/corner-right-up.svg
diff --git a/src/qcam/assets/feathericons/corner-up-left.svg b/src/apps/qcam/assets/feathericons/corner-up-left.svg
index 0a1ffd61..0a1ffd61 100644
--- a/src/qcam/assets/feathericons/corner-up-left.svg
+++ b/src/apps/qcam/assets/feathericons/corner-up-left.svg
diff --git a/src/qcam/assets/feathericons/corner-up-right.svg b/src/apps/qcam/assets/feathericons/corner-up-right.svg
index 0b8f961b..0b8f961b 100644
--- a/src/qcam/assets/feathericons/corner-up-right.svg
+++ b/src/apps/qcam/assets/feathericons/corner-up-right.svg
diff --git a/src/qcam/assets/feathericons/cpu.svg b/src/apps/qcam/assets/feathericons/cpu.svg
index 2ed16ef7..2ed16ef7 100644
--- a/src/qcam/assets/feathericons/cpu.svg
+++ b/src/apps/qcam/assets/feathericons/cpu.svg
diff --git a/src/qcam/assets/feathericons/credit-card.svg b/src/apps/qcam/assets/feathericons/credit-card.svg
index 1b7fd029..1b7fd029 100644
--- a/src/qcam/assets/feathericons/credit-card.svg
+++ b/src/apps/qcam/assets/feathericons/credit-card.svg
diff --git a/src/qcam/assets/feathericons/crop.svg b/src/apps/qcam/assets/feathericons/crop.svg
index ffbfd045..ffbfd045 100644
--- a/src/qcam/assets/feathericons/crop.svg
+++ b/src/apps/qcam/assets/feathericons/crop.svg
diff --git a/src/qcam/assets/feathericons/crosshair.svg b/src/apps/qcam/assets/feathericons/crosshair.svg
index ba394015..ba394015 100644
--- a/src/qcam/assets/feathericons/crosshair.svg
+++ b/src/apps/qcam/assets/feathericons/crosshair.svg
diff --git a/src/qcam/assets/feathericons/database.svg b/src/apps/qcam/assets/feathericons/database.svg
index c296fbcf..c296fbcf 100644
--- a/src/qcam/assets/feathericons/database.svg
+++ b/src/apps/qcam/assets/feathericons/database.svg
diff --git a/src/qcam/assets/feathericons/delete.svg b/src/apps/qcam/assets/feathericons/delete.svg
index 8c6074b9..8c6074b9 100644
--- a/src/qcam/assets/feathericons/delete.svg
+++ b/src/apps/qcam/assets/feathericons/delete.svg
diff --git a/src/qcam/assets/feathericons/disc.svg b/src/apps/qcam/assets/feathericons/disc.svg
index 2595b444..2595b444 100644
--- a/src/qcam/assets/feathericons/disc.svg
+++ b/src/apps/qcam/assets/feathericons/disc.svg
diff --git a/src/qcam/assets/feathericons/dollar-sign.svg b/src/apps/qcam/assets/feathericons/dollar-sign.svg
index 1a124d26..1a124d26 100644
--- a/src/qcam/assets/feathericons/dollar-sign.svg
+++ b/src/apps/qcam/assets/feathericons/dollar-sign.svg
diff --git a/src/qcam/assets/feathericons/download-cloud.svg b/src/apps/qcam/assets/feathericons/download-cloud.svg
index f3126fc3..f3126fc3 100644
--- a/src/qcam/assets/feathericons/download-cloud.svg
+++ b/src/apps/qcam/assets/feathericons/download-cloud.svg
diff --git a/src/qcam/assets/feathericons/download.svg b/src/apps/qcam/assets/feathericons/download.svg
index 76767a92..76767a92 100644
--- a/src/qcam/assets/feathericons/download.svg
+++ b/src/apps/qcam/assets/feathericons/download.svg
diff --git a/src/qcam/assets/feathericons/droplet.svg b/src/apps/qcam/assets/feathericons/droplet.svg
index ca093014..ca093014 100644
--- a/src/qcam/assets/feathericons/droplet.svg
+++ b/src/apps/qcam/assets/feathericons/droplet.svg
diff --git a/src/qcam/assets/feathericons/edit-2.svg b/src/apps/qcam/assets/feathericons/edit-2.svg
index 06830c9d..06830c9d 100644
--- a/src/qcam/assets/feathericons/edit-2.svg
+++ b/src/apps/qcam/assets/feathericons/edit-2.svg
diff --git a/src/qcam/assets/feathericons/edit-3.svg b/src/apps/qcam/assets/feathericons/edit-3.svg
index d728efcc..d728efcc 100644
--- a/src/qcam/assets/feathericons/edit-3.svg
+++ b/src/apps/qcam/assets/feathericons/edit-3.svg
diff --git a/src/qcam/assets/feathericons/edit.svg b/src/apps/qcam/assets/feathericons/edit.svg
index ec7b4ca2..ec7b4ca2 100644
--- a/src/qcam/assets/feathericons/edit.svg
+++ b/src/apps/qcam/assets/feathericons/edit.svg
diff --git a/src/qcam/assets/feathericons/external-link.svg b/src/apps/qcam/assets/feathericons/external-link.svg
index 6236df3e..6236df3e 100644
--- a/src/qcam/assets/feathericons/external-link.svg
+++ b/src/apps/qcam/assets/feathericons/external-link.svg
diff --git a/src/qcam/assets/feathericons/eye-off.svg b/src/apps/qcam/assets/feathericons/eye-off.svg
index 77c54cb4..77c54cb4 100644
--- a/src/qcam/assets/feathericons/eye-off.svg
+++ b/src/apps/qcam/assets/feathericons/eye-off.svg
diff --git a/src/qcam/assets/feathericons/eye.svg b/src/apps/qcam/assets/feathericons/eye.svg
index 9cde2437..9cde2437 100644
--- a/src/qcam/assets/feathericons/eye.svg
+++ b/src/apps/qcam/assets/feathericons/eye.svg
diff --git a/src/qcam/assets/feathericons/facebook.svg b/src/apps/qcam/assets/feathericons/facebook.svg
index 2570f56a..2570f56a 100644
--- a/src/qcam/assets/feathericons/facebook.svg
+++ b/src/apps/qcam/assets/feathericons/facebook.svg
diff --git a/src/qcam/assets/feathericons/fast-forward.svg b/src/apps/qcam/assets/feathericons/fast-forward.svg
index fa39877a..fa39877a 100644
--- a/src/qcam/assets/feathericons/fast-forward.svg
+++ b/src/apps/qcam/assets/feathericons/fast-forward.svg
diff --git a/src/qcam/assets/feathericons/feather.svg b/src/apps/qcam/assets/feathericons/feather.svg
index ac3b868d..ac3b868d 100644
--- a/src/qcam/assets/feathericons/feather.svg
+++ b/src/apps/qcam/assets/feathericons/feather.svg
diff --git a/src/qcam/assets/feathericons/feathericons.qrc b/src/apps/qcam/assets/feathericons/feathericons.qrc
index c5302040..c5302040 100644
--- a/src/qcam/assets/feathericons/feathericons.qrc
+++ b/src/apps/qcam/assets/feathericons/feathericons.qrc
diff --git a/src/qcam/assets/feathericons/figma.svg b/src/apps/qcam/assets/feathericons/figma.svg
index 66fd2178..66fd2178 100644
--- a/src/qcam/assets/feathericons/figma.svg
+++ b/src/apps/qcam/assets/feathericons/figma.svg
diff --git a/src/qcam/assets/feathericons/file-minus.svg b/src/apps/qcam/assets/feathericons/file-minus.svg
index 345756ef..345756ef 100644
--- a/src/qcam/assets/feathericons/file-minus.svg
+++ b/src/apps/qcam/assets/feathericons/file-minus.svg
diff --git a/src/qcam/assets/feathericons/file-plus.svg b/src/apps/qcam/assets/feathericons/file-plus.svg
index eed12004..eed12004 100644
--- a/src/qcam/assets/feathericons/file-plus.svg
+++ b/src/apps/qcam/assets/feathericons/file-plus.svg
diff --git a/src/qcam/assets/feathericons/file-text.svg b/src/apps/qcam/assets/feathericons/file-text.svg
index 4197ddd4..4197ddd4 100644
--- a/src/qcam/assets/feathericons/file-text.svg
+++ b/src/apps/qcam/assets/feathericons/file-text.svg
diff --git a/src/qcam/assets/feathericons/file.svg b/src/apps/qcam/assets/feathericons/file.svg
index 378519ab..378519ab 100644
--- a/src/qcam/assets/feathericons/file.svg
+++ b/src/apps/qcam/assets/feathericons/file.svg
diff --git a/src/qcam/assets/feathericons/film.svg b/src/apps/qcam/assets/feathericons/film.svg
index ac46360d..ac46360d 100644
--- a/src/qcam/assets/feathericons/film.svg
+++ b/src/apps/qcam/assets/feathericons/film.svg
diff --git a/src/qcam/assets/feathericons/filter.svg b/src/apps/qcam/assets/feathericons/filter.svg
index 38a47e04..38a47e04 100644
--- a/src/qcam/assets/feathericons/filter.svg
+++ b/src/apps/qcam/assets/feathericons/filter.svg
diff --git a/src/qcam/assets/feathericons/flag.svg b/src/apps/qcam/assets/feathericons/flag.svg
index 037737cb..037737cb 100644
--- a/src/qcam/assets/feathericons/flag.svg
+++ b/src/apps/qcam/assets/feathericons/flag.svg
diff --git a/src/qcam/assets/feathericons/folder-minus.svg b/src/apps/qcam/assets/feathericons/folder-minus.svg
index d5b7af65..d5b7af65 100644
--- a/src/qcam/assets/feathericons/folder-minus.svg
+++ b/src/apps/qcam/assets/feathericons/folder-minus.svg
diff --git a/src/qcam/assets/feathericons/folder-plus.svg b/src/apps/qcam/assets/feathericons/folder-plus.svg
index 898f2fc9..898f2fc9 100644
--- a/src/qcam/assets/feathericons/folder-plus.svg
+++ b/src/apps/qcam/assets/feathericons/folder-plus.svg
diff --git a/src/qcam/assets/feathericons/folder.svg b/src/apps/qcam/assets/feathericons/folder.svg
index 134458b9..134458b9 100644
--- a/src/qcam/assets/feathericons/folder.svg
+++ b/src/apps/qcam/assets/feathericons/folder.svg
diff --git a/src/qcam/assets/feathericons/framer.svg b/src/apps/qcam/assets/feathericons/framer.svg
index 3e663478..3e663478 100644
--- a/src/qcam/assets/feathericons/framer.svg
+++ b/src/apps/qcam/assets/feathericons/framer.svg
diff --git a/src/qcam/assets/feathericons/frown.svg b/src/apps/qcam/assets/feathericons/frown.svg
index f3122547..f3122547 100644
--- a/src/qcam/assets/feathericons/frown.svg
+++ b/src/apps/qcam/assets/feathericons/frown.svg
diff --git a/src/qcam/assets/feathericons/gift.svg b/src/apps/qcam/assets/feathericons/gift.svg
index d2c14bd6..d2c14bd6 100644
--- a/src/qcam/assets/feathericons/gift.svg
+++ b/src/apps/qcam/assets/feathericons/gift.svg
diff --git a/src/qcam/assets/feathericons/git-branch.svg b/src/apps/qcam/assets/feathericons/git-branch.svg
index 44003726..44003726 100644
--- a/src/qcam/assets/feathericons/git-branch.svg
+++ b/src/apps/qcam/assets/feathericons/git-branch.svg
diff --git a/src/qcam/assets/feathericons/git-commit.svg b/src/apps/qcam/assets/feathericons/git-commit.svg
index e959d725..e959d725 100644
--- a/src/qcam/assets/feathericons/git-commit.svg
+++ b/src/apps/qcam/assets/feathericons/git-commit.svg
diff --git a/src/qcam/assets/feathericons/git-merge.svg b/src/apps/qcam/assets/feathericons/git-merge.svg
index c65fffdd..c65fffdd 100644
--- a/src/qcam/assets/feathericons/git-merge.svg
+++ b/src/apps/qcam/assets/feathericons/git-merge.svg
diff --git a/src/qcam/assets/feathericons/git-pull-request.svg b/src/apps/qcam/assets/feathericons/git-pull-request.svg
index fc80bdfd..fc80bdfd 100644
--- a/src/qcam/assets/feathericons/git-pull-request.svg
+++ b/src/apps/qcam/assets/feathericons/git-pull-request.svg
diff --git a/src/qcam/assets/feathericons/github.svg b/src/apps/qcam/assets/feathericons/github.svg
index ff0af481..ff0af481 100644
--- a/src/qcam/assets/feathericons/github.svg
+++ b/src/apps/qcam/assets/feathericons/github.svg
diff --git a/src/qcam/assets/feathericons/gitlab.svg b/src/apps/qcam/assets/feathericons/gitlab.svg
index 85d54a1e..85d54a1e 100644
--- a/src/qcam/assets/feathericons/gitlab.svg
+++ b/src/apps/qcam/assets/feathericons/gitlab.svg
diff --git a/src/qcam/assets/feathericons/globe.svg b/src/apps/qcam/assets/feathericons/globe.svg
index 0a0586d3..0a0586d3 100644
--- a/src/qcam/assets/feathericons/globe.svg
+++ b/src/apps/qcam/assets/feathericons/globe.svg
diff --git a/src/qcam/assets/feathericons/grid.svg b/src/apps/qcam/assets/feathericons/grid.svg
index 8ef2e9d8..8ef2e9d8 100644
--- a/src/qcam/assets/feathericons/grid.svg
+++ b/src/apps/qcam/assets/feathericons/grid.svg
diff --git a/src/qcam/assets/feathericons/hard-drive.svg b/src/apps/qcam/assets/feathericons/hard-drive.svg
index 8e90fa1b..8e90fa1b 100644
--- a/src/qcam/assets/feathericons/hard-drive.svg
+++ b/src/apps/qcam/assets/feathericons/hard-drive.svg
diff --git a/src/qcam/assets/feathericons/hash.svg b/src/apps/qcam/assets/feathericons/hash.svg
index c9c8d41f..c9c8d41f 100644
--- a/src/qcam/assets/feathericons/hash.svg
+++ b/src/apps/qcam/assets/feathericons/hash.svg
diff --git a/src/qcam/assets/feathericons/headphones.svg b/src/apps/qcam/assets/feathericons/headphones.svg
index fd8915b4..fd8915b4 100644
--- a/src/qcam/assets/feathericons/headphones.svg
+++ b/src/apps/qcam/assets/feathericons/headphones.svg
diff --git a/src/qcam/assets/feathericons/heart.svg b/src/apps/qcam/assets/feathericons/heart.svg
index a083b7e2..a083b7e2 100644
--- a/src/qcam/assets/feathericons/heart.svg
+++ b/src/apps/qcam/assets/feathericons/heart.svg
diff --git a/src/qcam/assets/feathericons/help-circle.svg b/src/apps/qcam/assets/feathericons/help-circle.svg
index 51fddd80..51fddd80 100644
--- a/src/qcam/assets/feathericons/help-circle.svg
+++ b/src/apps/qcam/assets/feathericons/help-circle.svg
diff --git a/src/qcam/assets/feathericons/hexagon.svg b/src/apps/qcam/assets/feathericons/hexagon.svg
index eae7f255..eae7f255 100644
--- a/src/qcam/assets/feathericons/hexagon.svg
+++ b/src/apps/qcam/assets/feathericons/hexagon.svg
diff --git a/src/qcam/assets/feathericons/home.svg b/src/apps/qcam/assets/feathericons/home.svg
index 7bb31b23..7bb31b23 100644
--- a/src/qcam/assets/feathericons/home.svg
+++ b/src/apps/qcam/assets/feathericons/home.svg
diff --git a/src/qcam/assets/feathericons/image.svg b/src/apps/qcam/assets/feathericons/image.svg
index a7d84b98..a7d84b98 100644
--- a/src/qcam/assets/feathericons/image.svg
+++ b/src/apps/qcam/assets/feathericons/image.svg
diff --git a/src/qcam/assets/feathericons/inbox.svg b/src/apps/qcam/assets/feathericons/inbox.svg
index 03a13b4e..03a13b4e 100644
--- a/src/qcam/assets/feathericons/inbox.svg
+++ b/src/apps/qcam/assets/feathericons/inbox.svg
diff --git a/src/qcam/assets/feathericons/info.svg b/src/apps/qcam/assets/feathericons/info.svg
index a09fa5f1..a09fa5f1 100644
--- a/src/qcam/assets/feathericons/info.svg
+++ b/src/apps/qcam/assets/feathericons/info.svg
diff --git a/src/qcam/assets/feathericons/instagram.svg b/src/apps/qcam/assets/feathericons/instagram.svg
index 9fdb8e35..9fdb8e35 100644
--- a/src/qcam/assets/feathericons/instagram.svg
+++ b/src/apps/qcam/assets/feathericons/instagram.svg
diff --git a/src/qcam/assets/feathericons/italic.svg b/src/apps/qcam/assets/feathericons/italic.svg
index a123d371..a123d371 100644
--- a/src/qcam/assets/feathericons/italic.svg
+++ b/src/apps/qcam/assets/feathericons/italic.svg
diff --git a/src/qcam/assets/feathericons/key.svg b/src/apps/qcam/assets/feathericons/key.svg
index e778e74e..e778e74e 100644
--- a/src/qcam/assets/feathericons/key.svg
+++ b/src/apps/qcam/assets/feathericons/key.svg
diff --git a/src/qcam/assets/feathericons/layers.svg b/src/apps/qcam/assets/feathericons/layers.svg
index ea788c22..ea788c22 100644
--- a/src/qcam/assets/feathericons/layers.svg
+++ b/src/apps/qcam/assets/feathericons/layers.svg
diff --git a/src/qcam/assets/feathericons/layout.svg b/src/apps/qcam/assets/feathericons/layout.svg
index 28743d92..28743d92 100644
--- a/src/qcam/assets/feathericons/layout.svg
+++ b/src/apps/qcam/assets/feathericons/layout.svg
diff --git a/src/qcam/assets/feathericons/life-buoy.svg b/src/apps/qcam/assets/feathericons/life-buoy.svg
index 54c2bd7d..54c2bd7d 100644
--- a/src/qcam/assets/feathericons/life-buoy.svg
+++ b/src/apps/qcam/assets/feathericons/life-buoy.svg
diff --git a/src/qcam/assets/feathericons/link-2.svg b/src/apps/qcam/assets/feathericons/link-2.svg
index 8cc7f6dd..8cc7f6dd 100644
--- a/src/qcam/assets/feathericons/link-2.svg
+++ b/src/apps/qcam/assets/feathericons/link-2.svg
diff --git a/src/qcam/assets/feathericons/link.svg b/src/apps/qcam/assets/feathericons/link.svg
index c89dd41c..c89dd41c 100644
--- a/src/qcam/assets/feathericons/link.svg
+++ b/src/apps/qcam/assets/feathericons/link.svg
diff --git a/src/qcam/assets/feathericons/linkedin.svg b/src/apps/qcam/assets/feathericons/linkedin.svg
index 39531094..39531094 100644
--- a/src/qcam/assets/feathericons/linkedin.svg
+++ b/src/apps/qcam/assets/feathericons/linkedin.svg
diff --git a/src/qcam/assets/feathericons/list.svg b/src/apps/qcam/assets/feathericons/list.svg
index 5ce38eaa..5ce38eaa 100644
--- a/src/qcam/assets/feathericons/list.svg
+++ b/src/apps/qcam/assets/feathericons/list.svg
diff --git a/src/qcam/assets/feathericons/loader.svg b/src/apps/qcam/assets/feathericons/loader.svg
index e1a70c12..e1a70c12 100644
--- a/src/qcam/assets/feathericons/loader.svg
+++ b/src/apps/qcam/assets/feathericons/loader.svg
diff --git a/src/qcam/assets/feathericons/lock.svg b/src/apps/qcam/assets/feathericons/lock.svg
index de09d9db..de09d9db 100644
--- a/src/qcam/assets/feathericons/lock.svg
+++ b/src/apps/qcam/assets/feathericons/lock.svg
diff --git a/src/qcam/assets/feathericons/log-in.svg b/src/apps/qcam/assets/feathericons/log-in.svg
index ba0da59a..ba0da59a 100644
--- a/src/qcam/assets/feathericons/log-in.svg
+++ b/src/apps/qcam/assets/feathericons/log-in.svg
diff --git a/src/qcam/assets/feathericons/log-out.svg b/src/apps/qcam/assets/feathericons/log-out.svg
index c9002c90..c9002c90 100644
--- a/src/qcam/assets/feathericons/log-out.svg
+++ b/src/apps/qcam/assets/feathericons/log-out.svg
diff --git a/src/qcam/assets/feathericons/mail.svg b/src/apps/qcam/assets/feathericons/mail.svg
index 2af169e8..2af169e8 100644
--- a/src/qcam/assets/feathericons/mail.svg
+++ b/src/apps/qcam/assets/feathericons/mail.svg
diff --git a/src/qcam/assets/feathericons/map-pin.svg b/src/apps/qcam/assets/feathericons/map-pin.svg
index d5548e92..d5548e92 100644
--- a/src/qcam/assets/feathericons/map-pin.svg
+++ b/src/apps/qcam/assets/feathericons/map-pin.svg
diff --git a/src/qcam/assets/feathericons/map.svg b/src/apps/qcam/assets/feathericons/map.svg
index ecebd7bf..ecebd7bf 100644
--- a/src/qcam/assets/feathericons/map.svg
+++ b/src/apps/qcam/assets/feathericons/map.svg
diff --git a/src/qcam/assets/feathericons/maximize-2.svg b/src/apps/qcam/assets/feathericons/maximize-2.svg
index e41fc0b7..e41fc0b7 100644
--- a/src/qcam/assets/feathericons/maximize-2.svg
+++ b/src/apps/qcam/assets/feathericons/maximize-2.svg
diff --git a/src/qcam/assets/feathericons/maximize.svg b/src/apps/qcam/assets/feathericons/maximize.svg
index fc305189..fc305189 100644
--- a/src/qcam/assets/feathericons/maximize.svg
+++ b/src/apps/qcam/assets/feathericons/maximize.svg
diff --git a/src/qcam/assets/feathericons/meh.svg b/src/apps/qcam/assets/feathericons/meh.svg
index 6f57fff2..6f57fff2 100644
--- a/src/qcam/assets/feathericons/meh.svg
+++ b/src/apps/qcam/assets/feathericons/meh.svg
diff --git a/src/qcam/assets/feathericons/menu.svg b/src/apps/qcam/assets/feathericons/menu.svg
index e8a84a95..e8a84a95 100644
--- a/src/qcam/assets/feathericons/menu.svg
+++ b/src/apps/qcam/assets/feathericons/menu.svg
diff --git a/src/qcam/assets/feathericons/message-circle.svg b/src/apps/qcam/assets/feathericons/message-circle.svg
index 4b21b32b..4b21b32b 100644
--- a/src/qcam/assets/feathericons/message-circle.svg
+++ b/src/apps/qcam/assets/feathericons/message-circle.svg
diff --git a/src/qcam/assets/feathericons/message-square.svg b/src/apps/qcam/assets/feathericons/message-square.svg
index 6a2e4e59..6a2e4e59 100644
--- a/src/qcam/assets/feathericons/message-square.svg
+++ b/src/apps/qcam/assets/feathericons/message-square.svg
diff --git a/src/qcam/assets/feathericons/mic-off.svg b/src/apps/qcam/assets/feathericons/mic-off.svg
index 0786219c..0786219c 100644
--- a/src/qcam/assets/feathericons/mic-off.svg
+++ b/src/apps/qcam/assets/feathericons/mic-off.svg
diff --git a/src/qcam/assets/feathericons/mic.svg b/src/apps/qcam/assets/feathericons/mic.svg
index dc5f780c..dc5f780c 100644
--- a/src/qcam/assets/feathericons/mic.svg
+++ b/src/apps/qcam/assets/feathericons/mic.svg
diff --git a/src/qcam/assets/feathericons/minimize-2.svg b/src/apps/qcam/assets/feathericons/minimize-2.svg
index a720fa6c..a720fa6c 100644
--- a/src/qcam/assets/feathericons/minimize-2.svg
+++ b/src/apps/qcam/assets/feathericons/minimize-2.svg
diff --git a/src/qcam/assets/feathericons/minimize.svg b/src/apps/qcam/assets/feathericons/minimize.svg
index 46d61196..46d61196 100644
--- a/src/qcam/assets/feathericons/minimize.svg
+++ b/src/apps/qcam/assets/feathericons/minimize.svg
diff --git a/src/qcam/assets/feathericons/minus-circle.svg b/src/apps/qcam/assets/feathericons/minus-circle.svg
index 80c0de1e..80c0de1e 100644
--- a/src/qcam/assets/feathericons/minus-circle.svg
+++ b/src/apps/qcam/assets/feathericons/minus-circle.svg
diff --git a/src/qcam/assets/feathericons/minus-square.svg b/src/apps/qcam/assets/feathericons/minus-square.svg
index 4862832a..4862832a 100644
--- a/src/qcam/assets/feathericons/minus-square.svg
+++ b/src/apps/qcam/assets/feathericons/minus-square.svg
diff --git a/src/qcam/assets/feathericons/minus.svg b/src/apps/qcam/assets/feathericons/minus.svg
index 93cc7340..93cc7340 100644
--- a/src/qcam/assets/feathericons/minus.svg
+++ b/src/apps/qcam/assets/feathericons/minus.svg
diff --git a/src/qcam/assets/feathericons/monitor.svg b/src/apps/qcam/assets/feathericons/monitor.svg
index 6c3556db..6c3556db 100644
--- a/src/qcam/assets/feathericons/monitor.svg
+++ b/src/apps/qcam/assets/feathericons/monitor.svg
diff --git a/src/qcam/assets/feathericons/moon.svg b/src/apps/qcam/assets/feathericons/moon.svg
index dbf7c6cf..dbf7c6cf 100644
--- a/src/qcam/assets/feathericons/moon.svg
+++ b/src/apps/qcam/assets/feathericons/moon.svg
diff --git a/src/qcam/assets/feathericons/more-horizontal.svg b/src/apps/qcam/assets/feathericons/more-horizontal.svg
index dc6a8556..dc6a8556 100644
--- a/src/qcam/assets/feathericons/more-horizontal.svg
+++ b/src/apps/qcam/assets/feathericons/more-horizontal.svg
diff --git a/src/qcam/assets/feathericons/more-vertical.svg b/src/apps/qcam/assets/feathericons/more-vertical.svg
index cba6958f..cba6958f 100644
--- a/src/qcam/assets/feathericons/more-vertical.svg
+++ b/src/apps/qcam/assets/feathericons/more-vertical.svg
diff --git a/src/qcam/assets/feathericons/mouse-pointer.svg b/src/apps/qcam/assets/feathericons/mouse-pointer.svg
index f5af5591..f5af5591 100644
--- a/src/qcam/assets/feathericons/mouse-pointer.svg
+++ b/src/apps/qcam/assets/feathericons/mouse-pointer.svg
diff --git a/src/qcam/assets/feathericons/move.svg b/src/apps/qcam/assets/feathericons/move.svg
index 4e251b56..4e251b56 100644
--- a/src/qcam/assets/feathericons/move.svg
+++ b/src/apps/qcam/assets/feathericons/move.svg
diff --git a/src/qcam/assets/feathericons/music.svg b/src/apps/qcam/assets/feathericons/music.svg
index 7bee2f7e..7bee2f7e 100644
--- a/src/qcam/assets/feathericons/music.svg
+++ b/src/apps/qcam/assets/feathericons/music.svg
diff --git a/src/qcam/assets/feathericons/navigation-2.svg b/src/apps/qcam/assets/feathericons/navigation-2.svg
index ae31db96..ae31db96 100644
--- a/src/qcam/assets/feathericons/navigation-2.svg
+++ b/src/apps/qcam/assets/feathericons/navigation-2.svg
diff --git a/src/qcam/assets/feathericons/navigation.svg b/src/apps/qcam/assets/feathericons/navigation.svg
index f600a414..f600a414 100644
--- a/src/qcam/assets/feathericons/navigation.svg
+++ b/src/apps/qcam/assets/feathericons/navigation.svg
diff --git a/src/qcam/assets/feathericons/octagon.svg b/src/apps/qcam/assets/feathericons/octagon.svg
index 124c5483..124c5483 100644
--- a/src/qcam/assets/feathericons/octagon.svg
+++ b/src/apps/qcam/assets/feathericons/octagon.svg
diff --git a/src/qcam/assets/feathericons/package.svg b/src/apps/qcam/assets/feathericons/package.svg
index f1e09eec..f1e09eec 100644
--- a/src/qcam/assets/feathericons/package.svg
+++ b/src/apps/qcam/assets/feathericons/package.svg
diff --git a/src/qcam/assets/feathericons/paperclip.svg b/src/apps/qcam/assets/feathericons/paperclip.svg
index b1f69b7a..b1f69b7a 100644
--- a/src/qcam/assets/feathericons/paperclip.svg
+++ b/src/apps/qcam/assets/feathericons/paperclip.svg
diff --git a/src/qcam/assets/feathericons/pause-circle.svg b/src/apps/qcam/assets/feathericons/pause-circle.svg
index f6b1a8df..f6b1a8df 100644
--- a/src/qcam/assets/feathericons/pause-circle.svg
+++ b/src/apps/qcam/assets/feathericons/pause-circle.svg
diff --git a/src/qcam/assets/feathericons/pause.svg b/src/apps/qcam/assets/feathericons/pause.svg
index 4e78038d..4e78038d 100644
--- a/src/qcam/assets/feathericons/pause.svg
+++ b/src/apps/qcam/assets/feathericons/pause.svg
diff --git a/src/qcam/assets/feathericons/pen-tool.svg b/src/apps/qcam/assets/feathericons/pen-tool.svg
index 0d26fa1e..0d26fa1e 100644
--- a/src/qcam/assets/feathericons/pen-tool.svg
+++ b/src/apps/qcam/assets/feathericons/pen-tool.svg
diff --git a/src/qcam/assets/feathericons/percent.svg b/src/apps/qcam/assets/feathericons/percent.svg
index 2cb9719d..2cb9719d 100644
--- a/src/qcam/assets/feathericons/percent.svg
+++ b/src/apps/qcam/assets/feathericons/percent.svg
diff --git a/src/qcam/assets/feathericons/phone-call.svg b/src/apps/qcam/assets/feathericons/phone-call.svg
index 8b866602..8b866602 100644
--- a/src/qcam/assets/feathericons/phone-call.svg
+++ b/src/apps/qcam/assets/feathericons/phone-call.svg
diff --git a/src/qcam/assets/feathericons/phone-forwarded.svg b/src/apps/qcam/assets/feathericons/phone-forwarded.svg
index aa21befc..aa21befc 100644
--- a/src/qcam/assets/feathericons/phone-forwarded.svg
+++ b/src/apps/qcam/assets/feathericons/phone-forwarded.svg
diff --git a/src/qcam/assets/feathericons/phone-incoming.svg b/src/apps/qcam/assets/feathericons/phone-incoming.svg
index b2d523a8..b2d523a8 100644
--- a/src/qcam/assets/feathericons/phone-incoming.svg
+++ b/src/apps/qcam/assets/feathericons/phone-incoming.svg
diff --git a/src/qcam/assets/feathericons/phone-missed.svg b/src/apps/qcam/assets/feathericons/phone-missed.svg
index 4950f09f..4950f09f 100644
--- a/src/qcam/assets/feathericons/phone-missed.svg
+++ b/src/apps/qcam/assets/feathericons/phone-missed.svg
diff --git a/src/qcam/assets/feathericons/phone-off.svg b/src/apps/qcam/assets/feathericons/phone-off.svg
index 4d00fb3d..4d00fb3d 100644
--- a/src/qcam/assets/feathericons/phone-off.svg
+++ b/src/apps/qcam/assets/feathericons/phone-off.svg
diff --git a/src/qcam/assets/feathericons/phone-outgoing.svg b/src/apps/qcam/assets/feathericons/phone-outgoing.svg
index fea27a37..fea27a37 100644
--- a/src/qcam/assets/feathericons/phone-outgoing.svg
+++ b/src/apps/qcam/assets/feathericons/phone-outgoing.svg
diff --git a/src/qcam/assets/feathericons/phone.svg b/src/apps/qcam/assets/feathericons/phone.svg
index 2a35154a..2a35154a 100644
--- a/src/qcam/assets/feathericons/phone.svg
+++ b/src/apps/qcam/assets/feathericons/phone.svg
diff --git a/src/qcam/assets/feathericons/pie-chart.svg b/src/apps/qcam/assets/feathericons/pie-chart.svg
index b5bbe67c..b5bbe67c 100644
--- a/src/qcam/assets/feathericons/pie-chart.svg
+++ b/src/apps/qcam/assets/feathericons/pie-chart.svg
diff --git a/src/qcam/assets/feathericons/play-circle.svg b/src/apps/qcam/assets/feathericons/play-circle.svg
index 8766dc7b..8766dc7b 100644
--- a/src/qcam/assets/feathericons/play-circle.svg
+++ b/src/apps/qcam/assets/feathericons/play-circle.svg
diff --git a/src/qcam/assets/feathericons/play.svg b/src/apps/qcam/assets/feathericons/play.svg
index fd76e30d..fd76e30d 100644
--- a/src/qcam/assets/feathericons/play.svg
+++ b/src/apps/qcam/assets/feathericons/play.svg
diff --git a/src/qcam/assets/feathericons/plus-circle.svg b/src/apps/qcam/assets/feathericons/plus-circle.svg
index 4291ff05..4291ff05 100644
--- a/src/qcam/assets/feathericons/plus-circle.svg
+++ b/src/apps/qcam/assets/feathericons/plus-circle.svg
diff --git a/src/qcam/assets/feathericons/plus-square.svg b/src/apps/qcam/assets/feathericons/plus-square.svg
index c380e24b..c380e24b 100644
--- a/src/qcam/assets/feathericons/plus-square.svg
+++ b/src/apps/qcam/assets/feathericons/plus-square.svg
diff --git a/src/qcam/assets/feathericons/plus.svg b/src/apps/qcam/assets/feathericons/plus.svg
index 703c5b7b..703c5b7b 100644
--- a/src/qcam/assets/feathericons/plus.svg
+++ b/src/apps/qcam/assets/feathericons/plus.svg
diff --git a/src/qcam/assets/feathericons/pocket.svg b/src/apps/qcam/assets/feathericons/pocket.svg
index a3b25619..a3b25619 100644
--- a/src/qcam/assets/feathericons/pocket.svg
+++ b/src/apps/qcam/assets/feathericons/pocket.svg
diff --git a/src/qcam/assets/feathericons/power.svg b/src/apps/qcam/assets/feathericons/power.svg
index 598308fc..598308fc 100644
--- a/src/qcam/assets/feathericons/power.svg
+++ b/src/apps/qcam/assets/feathericons/power.svg
diff --git a/src/qcam/assets/feathericons/printer.svg b/src/apps/qcam/assets/feathericons/printer.svg
index 8a9a7ace..8a9a7ace 100644
--- a/src/qcam/assets/feathericons/printer.svg
+++ b/src/apps/qcam/assets/feathericons/printer.svg
diff --git a/src/qcam/assets/feathericons/radio.svg b/src/apps/qcam/assets/feathericons/radio.svg
index 5abfcd13..5abfcd13 100644
--- a/src/qcam/assets/feathericons/radio.svg
+++ b/src/apps/qcam/assets/feathericons/radio.svg
diff --git a/src/qcam/assets/feathericons/refresh-ccw.svg b/src/apps/qcam/assets/feathericons/refresh-ccw.svg
index 10cff0ec..10cff0ec 100644
--- a/src/qcam/assets/feathericons/refresh-ccw.svg
+++ b/src/apps/qcam/assets/feathericons/refresh-ccw.svg
diff --git a/src/qcam/assets/feathericons/refresh-cw.svg b/src/apps/qcam/assets/feathericons/refresh-cw.svg
index 06c358dd..06c358dd 100644
--- a/src/qcam/assets/feathericons/refresh-cw.svg
+++ b/src/apps/qcam/assets/feathericons/refresh-cw.svg
diff --git a/src/qcam/assets/feathericons/repeat.svg b/src/apps/qcam/assets/feathericons/repeat.svg
index c7657b08..c7657b08 100644
--- a/src/qcam/assets/feathericons/repeat.svg
+++ b/src/apps/qcam/assets/feathericons/repeat.svg
diff --git a/src/qcam/assets/feathericons/rewind.svg b/src/apps/qcam/assets/feathericons/rewind.svg
index 7b0fa3d5..7b0fa3d5 100644
--- a/src/qcam/assets/feathericons/rewind.svg
+++ b/src/apps/qcam/assets/feathericons/rewind.svg
diff --git a/src/qcam/assets/feathericons/rotate-ccw.svg b/src/apps/qcam/assets/feathericons/rotate-ccw.svg
index ade5dc42..ade5dc42 100644
--- a/src/qcam/assets/feathericons/rotate-ccw.svg
+++ b/src/apps/qcam/assets/feathericons/rotate-ccw.svg
diff --git a/src/qcam/assets/feathericons/rotate-cw.svg b/src/apps/qcam/assets/feathericons/rotate-cw.svg
index 83dca351..83dca351 100644
--- a/src/qcam/assets/feathericons/rotate-cw.svg
+++ b/src/apps/qcam/assets/feathericons/rotate-cw.svg
diff --git a/src/qcam/assets/feathericons/rss.svg b/src/apps/qcam/assets/feathericons/rss.svg
index c9a13684..c9a13684 100644
--- a/src/qcam/assets/feathericons/rss.svg
+++ b/src/apps/qcam/assets/feathericons/rss.svg
diff --git a/src/qcam/assets/feathericons/save.svg b/src/apps/qcam/assets/feathericons/save.svg
index 46c72990..46c72990 100644
--- a/src/qcam/assets/feathericons/save.svg
+++ b/src/apps/qcam/assets/feathericons/save.svg
diff --git a/src/qcam/assets/feathericons/scissors.svg b/src/apps/qcam/assets/feathericons/scissors.svg
index fd0647ff..fd0647ff 100644
--- a/src/qcam/assets/feathericons/scissors.svg
+++ b/src/apps/qcam/assets/feathericons/scissors.svg
diff --git a/src/qcam/assets/feathericons/search.svg b/src/apps/qcam/assets/feathericons/search.svg
index 8710306d..8710306d 100644
--- a/src/qcam/assets/feathericons/search.svg
+++ b/src/apps/qcam/assets/feathericons/search.svg
diff --git a/src/qcam/assets/feathericons/send.svg b/src/apps/qcam/assets/feathericons/send.svg
index 42ef2a24..42ef2a24 100644
--- a/src/qcam/assets/feathericons/send.svg
+++ b/src/apps/qcam/assets/feathericons/send.svg
diff --git a/src/qcam/assets/feathericons/server.svg b/src/apps/qcam/assets/feathericons/server.svg
index 54ce094a..54ce094a 100644
--- a/src/qcam/assets/feathericons/server.svg
+++ b/src/apps/qcam/assets/feathericons/server.svg
diff --git a/src/qcam/assets/feathericons/settings.svg b/src/apps/qcam/assets/feathericons/settings.svg
index 19c27265..19c27265 100644
--- a/src/qcam/assets/feathericons/settings.svg
+++ b/src/apps/qcam/assets/feathericons/settings.svg
diff --git a/src/qcam/assets/feathericons/share-2.svg b/src/apps/qcam/assets/feathericons/share-2.svg
index 09b1c7bc..09b1c7bc 100644
--- a/src/qcam/assets/feathericons/share-2.svg
+++ b/src/apps/qcam/assets/feathericons/share-2.svg
diff --git a/src/qcam/assets/feathericons/share.svg b/src/apps/qcam/assets/feathericons/share.svg
index df38c14d..df38c14d 100644
--- a/src/qcam/assets/feathericons/share.svg
+++ b/src/apps/qcam/assets/feathericons/share.svg
diff --git a/src/qcam/assets/feathericons/shield-off.svg b/src/apps/qcam/assets/feathericons/shield-off.svg
index 18692ddd..18692ddd 100644
--- a/src/qcam/assets/feathericons/shield-off.svg
+++ b/src/apps/qcam/assets/feathericons/shield-off.svg
diff --git a/src/qcam/assets/feathericons/shield.svg b/src/apps/qcam/assets/feathericons/shield.svg
index c7c48413..c7c48413 100644
--- a/src/qcam/assets/feathericons/shield.svg
+++ b/src/apps/qcam/assets/feathericons/shield.svg
diff --git a/src/qcam/assets/feathericons/shopping-bag.svg b/src/apps/qcam/assets/feathericons/shopping-bag.svg
index eaa39e81..eaa39e81 100644
--- a/src/qcam/assets/feathericons/shopping-bag.svg
+++ b/src/apps/qcam/assets/feathericons/shopping-bag.svg
diff --git a/src/qcam/assets/feathericons/shopping-cart.svg b/src/apps/qcam/assets/feathericons/shopping-cart.svg
index 17a40bf4..17a40bf4 100644
--- a/src/qcam/assets/feathericons/shopping-cart.svg
+++ b/src/apps/qcam/assets/feathericons/shopping-cart.svg
diff --git a/src/qcam/assets/feathericons/shuffle.svg b/src/apps/qcam/assets/feathericons/shuffle.svg
index 8cfb5db5..8cfb5db5 100644
--- a/src/qcam/assets/feathericons/shuffle.svg
+++ b/src/apps/qcam/assets/feathericons/shuffle.svg
diff --git a/src/qcam/assets/feathericons/sidebar.svg b/src/apps/qcam/assets/feathericons/sidebar.svg
index 8ba817e6..8ba817e6 100644
--- a/src/qcam/assets/feathericons/sidebar.svg
+++ b/src/apps/qcam/assets/feathericons/sidebar.svg
diff --git a/src/qcam/assets/feathericons/skip-back.svg b/src/apps/qcam/assets/feathericons/skip-back.svg
index 88d024e2..88d024e2 100644
--- a/src/qcam/assets/feathericons/skip-back.svg
+++ b/src/apps/qcam/assets/feathericons/skip-back.svg
diff --git a/src/qcam/assets/feathericons/skip-forward.svg b/src/apps/qcam/assets/feathericons/skip-forward.svg
index f3fdac3a..f3fdac3a 100644
--- a/src/qcam/assets/feathericons/skip-forward.svg
+++ b/src/apps/qcam/assets/feathericons/skip-forward.svg
diff --git a/src/qcam/assets/feathericons/slack.svg b/src/apps/qcam/assets/feathericons/slack.svg
index 5d973466..5d973466 100644
--- a/src/qcam/assets/feathericons/slack.svg
+++ b/src/apps/qcam/assets/feathericons/slack.svg
diff --git a/src/qcam/assets/feathericons/slash.svg b/src/apps/qcam/assets/feathericons/slash.svg
index f4131b85..f4131b85 100644
--- a/src/qcam/assets/feathericons/slash.svg
+++ b/src/apps/qcam/assets/feathericons/slash.svg
diff --git a/src/qcam/assets/feathericons/sliders.svg b/src/apps/qcam/assets/feathericons/sliders.svg
index 19c93852..19c93852 100644
--- a/src/qcam/assets/feathericons/sliders.svg
+++ b/src/apps/qcam/assets/feathericons/sliders.svg
diff --git a/src/qcam/assets/feathericons/smartphone.svg b/src/apps/qcam/assets/feathericons/smartphone.svg
index 0171a95a..0171a95a 100644
--- a/src/qcam/assets/feathericons/smartphone.svg
+++ b/src/apps/qcam/assets/feathericons/smartphone.svg
diff --git a/src/qcam/assets/feathericons/smile.svg b/src/apps/qcam/assets/feathericons/smile.svg
index 24dc8a26..24dc8a26 100644
--- a/src/qcam/assets/feathericons/smile.svg
+++ b/src/apps/qcam/assets/feathericons/smile.svg
diff --git a/src/qcam/assets/feathericons/speaker.svg b/src/apps/qcam/assets/feathericons/speaker.svg
index 75d5ff9c..75d5ff9c 100644
--- a/src/qcam/assets/feathericons/speaker.svg
+++ b/src/apps/qcam/assets/feathericons/speaker.svg
diff --git a/src/qcam/assets/feathericons/square.svg b/src/apps/qcam/assets/feathericons/square.svg
index 6eabc77d..6eabc77d 100644
--- a/src/qcam/assets/feathericons/square.svg
+++ b/src/apps/qcam/assets/feathericons/square.svg
diff --git a/src/qcam/assets/feathericons/star.svg b/src/apps/qcam/assets/feathericons/star.svg
index bcdc31aa..bcdc31aa 100644
--- a/src/qcam/assets/feathericons/star.svg
+++ b/src/apps/qcam/assets/feathericons/star.svg
diff --git a/src/qcam/assets/feathericons/stop-circle.svg b/src/apps/qcam/assets/feathericons/stop-circle.svg
index c10d9d47..c10d9d47 100644
--- a/src/qcam/assets/feathericons/stop-circle.svg
+++ b/src/apps/qcam/assets/feathericons/stop-circle.svg
diff --git a/src/qcam/assets/feathericons/sun.svg b/src/apps/qcam/assets/feathericons/sun.svg
index 7f51b94d..7f51b94d 100644
--- a/src/qcam/assets/feathericons/sun.svg
+++ b/src/apps/qcam/assets/feathericons/sun.svg
diff --git a/src/qcam/assets/feathericons/sunrise.svg b/src/apps/qcam/assets/feathericons/sunrise.svg
index eff4b1e4..eff4b1e4 100644
--- a/src/qcam/assets/feathericons/sunrise.svg
+++ b/src/apps/qcam/assets/feathericons/sunrise.svg
diff --git a/src/qcam/assets/feathericons/sunset.svg b/src/apps/qcam/assets/feathericons/sunset.svg
index a5a22215..a5a22215 100644
--- a/src/qcam/assets/feathericons/sunset.svg
+++ b/src/apps/qcam/assets/feathericons/sunset.svg
diff --git a/src/qcam/assets/feathericons/tablet.svg b/src/apps/qcam/assets/feathericons/tablet.svg
index 9c80b40a..9c80b40a 100644
--- a/src/qcam/assets/feathericons/tablet.svg
+++ b/src/apps/qcam/assets/feathericons/tablet.svg
diff --git a/src/qcam/assets/feathericons/tag.svg b/src/apps/qcam/assets/feathericons/tag.svg
index 7219b15f..7219b15f 100644
--- a/src/qcam/assets/feathericons/tag.svg
+++ b/src/apps/qcam/assets/feathericons/tag.svg
diff --git a/src/qcam/assets/feathericons/target.svg b/src/apps/qcam/assets/feathericons/target.svg
index be84b17c..be84b17c 100644
--- a/src/qcam/assets/feathericons/target.svg
+++ b/src/apps/qcam/assets/feathericons/target.svg
diff --git a/src/qcam/assets/feathericons/terminal.svg b/src/apps/qcam/assets/feathericons/terminal.svg
index af459c04..af459c04 100644
--- a/src/qcam/assets/feathericons/terminal.svg
+++ b/src/apps/qcam/assets/feathericons/terminal.svg
diff --git a/src/qcam/assets/feathericons/thermometer.svg b/src/apps/qcam/assets/feathericons/thermometer.svg
index 33142ccc..33142ccc 100644
--- a/src/qcam/assets/feathericons/thermometer.svg
+++ b/src/apps/qcam/assets/feathericons/thermometer.svg
diff --git a/src/qcam/assets/feathericons/thumbs-down.svg b/src/apps/qcam/assets/feathericons/thumbs-down.svg
index 3e7bcd6d..3e7bcd6d 100644
--- a/src/qcam/assets/feathericons/thumbs-down.svg
+++ b/src/apps/qcam/assets/feathericons/thumbs-down.svg
diff --git a/src/qcam/assets/feathericons/thumbs-up.svg b/src/apps/qcam/assets/feathericons/thumbs-up.svg
index 226c44d8..226c44d8 100644
--- a/src/qcam/assets/feathericons/thumbs-up.svg
+++ b/src/apps/qcam/assets/feathericons/thumbs-up.svg
diff --git a/src/qcam/assets/feathericons/toggle-left.svg b/src/apps/qcam/assets/feathericons/toggle-left.svg
index 240be290..240be290 100644
--- a/src/qcam/assets/feathericons/toggle-left.svg
+++ b/src/apps/qcam/assets/feathericons/toggle-left.svg
diff --git a/src/qcam/assets/feathericons/toggle-right.svg b/src/apps/qcam/assets/feathericons/toggle-right.svg
index fc6e81c1..fc6e81c1 100644
--- a/src/qcam/assets/feathericons/toggle-right.svg
+++ b/src/apps/qcam/assets/feathericons/toggle-right.svg
diff --git a/src/qcam/assets/feathericons/tool.svg b/src/apps/qcam/assets/feathericons/tool.svg
index f3cbf3d9..f3cbf3d9 100644
--- a/src/qcam/assets/feathericons/tool.svg
+++ b/src/apps/qcam/assets/feathericons/tool.svg
diff --git a/src/qcam/assets/feathericons/trash-2.svg b/src/apps/qcam/assets/feathericons/trash-2.svg
index f24d55bf..f24d55bf 100644
--- a/src/qcam/assets/feathericons/trash-2.svg
+++ b/src/apps/qcam/assets/feathericons/trash-2.svg
diff --git a/src/qcam/assets/feathericons/trash.svg b/src/apps/qcam/assets/feathericons/trash.svg
index 55650bd4..55650bd4 100644
--- a/src/qcam/assets/feathericons/trash.svg
+++ b/src/apps/qcam/assets/feathericons/trash.svg
diff --git a/src/qcam/assets/feathericons/trello.svg b/src/apps/qcam/assets/feathericons/trello.svg
index b2f599b6..b2f599b6 100644
--- a/src/qcam/assets/feathericons/trello.svg
+++ b/src/apps/qcam/assets/feathericons/trello.svg
diff --git a/src/qcam/assets/feathericons/trending-down.svg b/src/apps/qcam/assets/feathericons/trending-down.svg
index a9d4cfa5..a9d4cfa5 100644
--- a/src/qcam/assets/feathericons/trending-down.svg
+++ b/src/apps/qcam/assets/feathericons/trending-down.svg
diff --git a/src/qcam/assets/feathericons/trending-up.svg b/src/apps/qcam/assets/feathericons/trending-up.svg
index 52026a4d..52026a4d 100644
--- a/src/qcam/assets/feathericons/trending-up.svg
+++ b/src/apps/qcam/assets/feathericons/trending-up.svg
diff --git a/src/qcam/assets/feathericons/triangle.svg b/src/apps/qcam/assets/feathericons/triangle.svg
index 274b6528..274b6528 100644
--- a/src/qcam/assets/feathericons/triangle.svg
+++ b/src/apps/qcam/assets/feathericons/triangle.svg
diff --git a/src/qcam/assets/feathericons/truck.svg b/src/apps/qcam/assets/feathericons/truck.svg
index 33898373..33898373 100644
--- a/src/qcam/assets/feathericons/truck.svg
+++ b/src/apps/qcam/assets/feathericons/truck.svg
diff --git a/src/qcam/assets/feathericons/tv.svg b/src/apps/qcam/assets/feathericons/tv.svg
index 955bbfff..955bbfff 100644
--- a/src/qcam/assets/feathericons/tv.svg
+++ b/src/apps/qcam/assets/feathericons/tv.svg
diff --git a/src/qcam/assets/feathericons/twitch.svg b/src/apps/qcam/assets/feathericons/twitch.svg
index 17062495..17062495 100644
--- a/src/qcam/assets/feathericons/twitch.svg
+++ b/src/apps/qcam/assets/feathericons/twitch.svg
diff --git a/src/qcam/assets/feathericons/twitter.svg b/src/apps/qcam/assets/feathericons/twitter.svg
index f8886eca..f8886eca 100644
--- a/src/qcam/assets/feathericons/twitter.svg
+++ b/src/apps/qcam/assets/feathericons/twitter.svg
diff --git a/src/qcam/assets/feathericons/type.svg b/src/apps/qcam/assets/feathericons/type.svg
index c6b2de33..c6b2de33 100644
--- a/src/qcam/assets/feathericons/type.svg
+++ b/src/apps/qcam/assets/feathericons/type.svg
diff --git a/src/qcam/assets/feathericons/umbrella.svg b/src/apps/qcam/assets/feathericons/umbrella.svg
index dc77c0cb..dc77c0cb 100644
--- a/src/qcam/assets/feathericons/umbrella.svg
+++ b/src/apps/qcam/assets/feathericons/umbrella.svg
diff --git a/src/qcam/assets/feathericons/underline.svg b/src/apps/qcam/assets/feathericons/underline.svg
index 044945d4..044945d4 100644
--- a/src/qcam/assets/feathericons/underline.svg
+++ b/src/apps/qcam/assets/feathericons/underline.svg
diff --git a/src/qcam/assets/feathericons/unlock.svg b/src/apps/qcam/assets/feathericons/unlock.svg
index 01dc3597..01dc3597 100644
--- a/src/qcam/assets/feathericons/unlock.svg
+++ b/src/apps/qcam/assets/feathericons/unlock.svg
diff --git a/src/qcam/assets/feathericons/upload-cloud.svg b/src/apps/qcam/assets/feathericons/upload-cloud.svg
index a1db297c..a1db297c 100644
--- a/src/qcam/assets/feathericons/upload-cloud.svg
+++ b/src/apps/qcam/assets/feathericons/upload-cloud.svg
diff --git a/src/qcam/assets/feathericons/upload.svg b/src/apps/qcam/assets/feathericons/upload.svg
index 91eaff75..91eaff75 100644
--- a/src/qcam/assets/feathericons/upload.svg
+++ b/src/apps/qcam/assets/feathericons/upload.svg
diff --git a/src/qcam/assets/feathericons/user-check.svg b/src/apps/qcam/assets/feathericons/user-check.svg
index 42f91b29..42f91b29 100644
--- a/src/qcam/assets/feathericons/user-check.svg
+++ b/src/apps/qcam/assets/feathericons/user-check.svg
diff --git a/src/qcam/assets/feathericons/user-minus.svg b/src/apps/qcam/assets/feathericons/user-minus.svg
index 44b75f5a..44b75f5a 100644
--- a/src/qcam/assets/feathericons/user-minus.svg
+++ b/src/apps/qcam/assets/feathericons/user-minus.svg
diff --git a/src/qcam/assets/feathericons/user-plus.svg b/src/apps/qcam/assets/feathericons/user-plus.svg
index 21460f6e..21460f6e 100644
--- a/src/qcam/assets/feathericons/user-plus.svg
+++ b/src/apps/qcam/assets/feathericons/user-plus.svg
diff --git a/src/qcam/assets/feathericons/user-x.svg b/src/apps/qcam/assets/feathericons/user-x.svg
index 0c41a481..0c41a481 100644
--- a/src/qcam/assets/feathericons/user-x.svg
+++ b/src/apps/qcam/assets/feathericons/user-x.svg
diff --git a/src/qcam/assets/feathericons/user.svg b/src/apps/qcam/assets/feathericons/user.svg
index 7bb5f291..7bb5f291 100644
--- a/src/qcam/assets/feathericons/user.svg
+++ b/src/apps/qcam/assets/feathericons/user.svg
diff --git a/src/qcam/assets/feathericons/users.svg b/src/apps/qcam/assets/feathericons/users.svg
index aacf6b08..aacf6b08 100644
--- a/src/qcam/assets/feathericons/users.svg
+++ b/src/apps/qcam/assets/feathericons/users.svg
diff --git a/src/qcam/assets/feathericons/video-off.svg b/src/apps/qcam/assets/feathericons/video-off.svg
index 08ec6973..08ec6973 100644
--- a/src/qcam/assets/feathericons/video-off.svg
+++ b/src/apps/qcam/assets/feathericons/video-off.svg
diff --git a/src/qcam/assets/feathericons/video.svg b/src/apps/qcam/assets/feathericons/video.svg
index 8ff156aa..8ff156aa 100644
--- a/src/qcam/assets/feathericons/video.svg
+++ b/src/apps/qcam/assets/feathericons/video.svg
diff --git a/src/qcam/assets/feathericons/voicemail.svg b/src/apps/qcam/assets/feathericons/voicemail.svg
index 5d78a8e7..5d78a8e7 100644
--- a/src/qcam/assets/feathericons/voicemail.svg
+++ b/src/apps/qcam/assets/feathericons/voicemail.svg
diff --git a/src/qcam/assets/feathericons/volume-1.svg b/src/apps/qcam/assets/feathericons/volume-1.svg
index 150e875f..150e875f 100644
--- a/src/qcam/assets/feathericons/volume-1.svg
+++ b/src/apps/qcam/assets/feathericons/volume-1.svg
diff --git a/src/qcam/assets/feathericons/volume-2.svg b/src/apps/qcam/assets/feathericons/volume-2.svg
index 03d521c7..03d521c7 100644
--- a/src/qcam/assets/feathericons/volume-2.svg
+++ b/src/apps/qcam/assets/feathericons/volume-2.svg
diff --git a/src/qcam/assets/feathericons/volume-x.svg b/src/apps/qcam/assets/feathericons/volume-x.svg
index be442406..be442406 100644
--- a/src/qcam/assets/feathericons/volume-x.svg
+++ b/src/apps/qcam/assets/feathericons/volume-x.svg
diff --git a/src/qcam/assets/feathericons/volume.svg b/src/apps/qcam/assets/feathericons/volume.svg
index 53bfe15e..53bfe15e 100644
--- a/src/qcam/assets/feathericons/volume.svg
+++ b/src/apps/qcam/assets/feathericons/volume.svg
diff --git a/src/qcam/assets/feathericons/watch.svg b/src/apps/qcam/assets/feathericons/watch.svg
index a1099da3..a1099da3 100644
--- a/src/qcam/assets/feathericons/watch.svg
+++ b/src/apps/qcam/assets/feathericons/watch.svg
diff --git a/src/qcam/assets/feathericons/wifi-off.svg b/src/apps/qcam/assets/feathericons/wifi-off.svg
index 35eae43b..35eae43b 100644
--- a/src/qcam/assets/feathericons/wifi-off.svg
+++ b/src/apps/qcam/assets/feathericons/wifi-off.svg
diff --git a/src/qcam/assets/feathericons/wifi.svg b/src/apps/qcam/assets/feathericons/wifi.svg
index 748c285e..748c285e 100644
--- a/src/qcam/assets/feathericons/wifi.svg
+++ b/src/apps/qcam/assets/feathericons/wifi.svg
diff --git a/src/qcam/assets/feathericons/wind.svg b/src/apps/qcam/assets/feathericons/wind.svg
index 82b36468..82b36468 100644
--- a/src/qcam/assets/feathericons/wind.svg
+++ b/src/apps/qcam/assets/feathericons/wind.svg
diff --git a/src/qcam/assets/feathericons/x-circle.svg b/src/apps/qcam/assets/feathericons/x-circle.svg
index 94aad5e5..94aad5e5 100644
--- a/src/qcam/assets/feathericons/x-circle.svg
+++ b/src/apps/qcam/assets/feathericons/x-circle.svg
diff --git a/src/qcam/assets/feathericons/x-octagon.svg b/src/apps/qcam/assets/feathericons/x-octagon.svg
index 85431985..85431985 100644
--- a/src/qcam/assets/feathericons/x-octagon.svg
+++ b/src/apps/qcam/assets/feathericons/x-octagon.svg
diff --git a/src/qcam/assets/feathericons/x-square.svg b/src/apps/qcam/assets/feathericons/x-square.svg
index 7677c387..7677c387 100644
--- a/src/qcam/assets/feathericons/x-square.svg
+++ b/src/apps/qcam/assets/feathericons/x-square.svg
diff --git a/src/qcam/assets/feathericons/x.svg b/src/apps/qcam/assets/feathericons/x.svg
index 7d5875ca..7d5875ca 100644
--- a/src/qcam/assets/feathericons/x.svg
+++ b/src/apps/qcam/assets/feathericons/x.svg
diff --git a/src/qcam/assets/feathericons/youtube.svg b/src/apps/qcam/assets/feathericons/youtube.svg
index c4824385..c4824385 100644
--- a/src/qcam/assets/feathericons/youtube.svg
+++ b/src/apps/qcam/assets/feathericons/youtube.svg
diff --git a/src/qcam/assets/feathericons/zap-off.svg b/src/apps/qcam/assets/feathericons/zap-off.svg
index c636f8bb..c636f8bb 100644
--- a/src/qcam/assets/feathericons/zap-off.svg
+++ b/src/apps/qcam/assets/feathericons/zap-off.svg
diff --git a/src/qcam/assets/feathericons/zap.svg b/src/apps/qcam/assets/feathericons/zap.svg
index 8fdafa93..8fdafa93 100644
--- a/src/qcam/assets/feathericons/zap.svg
+++ b/src/apps/qcam/assets/feathericons/zap.svg
diff --git a/src/qcam/assets/feathericons/zoom-in.svg b/src/apps/qcam/assets/feathericons/zoom-in.svg
index da4572d2..da4572d2 100644
--- a/src/qcam/assets/feathericons/zoom-in.svg
+++ b/src/apps/qcam/assets/feathericons/zoom-in.svg
diff --git a/src/qcam/assets/feathericons/zoom-out.svg b/src/apps/qcam/assets/feathericons/zoom-out.svg
index fd678d72..fd678d72 100644
--- a/src/qcam/assets/feathericons/zoom-out.svg
+++ b/src/apps/qcam/assets/feathericons/zoom-out.svg
diff --git a/src/qcam/assets/shader/RGB.frag b/src/apps/qcam/assets/shader/RGB.frag
index 4c374ac9..4c374ac9 100644
--- a/src/qcam/assets/shader/RGB.frag
+++ b/src/apps/qcam/assets/shader/RGB.frag
diff --git a/src/qcam/assets/shader/YUV_2_planes.frag b/src/apps/qcam/assets/shader/YUV_2_planes.frag
index 254463c0..1d5d1206 100644
--- a/src/qcam/assets/shader/YUV_2_planes.frag
+++ b/src/apps/qcam/assets/shader/YUV_2_planes.frag
@@ -13,27 +13,30 @@ varying vec2 textureOut;
uniform sampler2D tex_y;
uniform sampler2D tex_u;
+const mat3 yuv2rgb_matrix = mat3(
+ YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+ YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
void main(void)
{
vec3 yuv;
- vec3 rgb;
- mat3 yuv2rgb_bt601_mat = mat3(
- vec3(1.164, 1.164, 1.164),
- vec3(0.000, -0.392, 2.017),
- vec3(1.596, -0.813, 0.000)
- );
-
- yuv.x = texture2D(tex_y, textureOut).r - 0.063;
+
+ yuv.x = texture2D(tex_y, textureOut).r;
#if defined(YUV_PATTERN_UV)
- yuv.y = texture2D(tex_u, textureOut).r - 0.500;
- yuv.z = texture2D(tex_u, textureOut).a - 0.500;
+ yuv.y = texture2D(tex_u, textureOut).r;
+ yuv.z = texture2D(tex_u, textureOut).a;
#elif defined(YUV_PATTERN_VU)
- yuv.y = texture2D(tex_u, textureOut).a - 0.500;
- yuv.z = texture2D(tex_u, textureOut).r - 0.500;
+ yuv.y = texture2D(tex_u, textureOut).a;
+ yuv.z = texture2D(tex_u, textureOut).r;
#else
#error Invalid pattern
#endif
- rgb = yuv2rgb_bt601_mat * yuv;
+ vec3 rgb = yuv2rgb_matrix * (yuv - yuv2rgb_offset);
+
gl_FragColor = vec4(rgb, 1.0);
}
diff --git a/src/qcam/assets/shader/YUV_3_planes.frag b/src/apps/qcam/assets/shader/YUV_3_planes.frag
index 2be74b5d..8f788e90 100644
--- a/src/qcam/assets/shader/YUV_3_planes.frag
+++ b/src/apps/qcam/assets/shader/YUV_3_planes.frag
@@ -14,20 +14,23 @@ uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
+const mat3 yuv2rgb_matrix = mat3(
+ YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+ YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
void main(void)
{
vec3 yuv;
- vec3 rgb;
- mat3 yuv2rgb_bt601_mat = mat3(
- vec3(1.164, 1.164, 1.164),
- vec3(0.000, -0.392, 2.017),
- vec3(1.596, -0.813, 0.000)
- );
-
- yuv.x = texture2D(tex_y, textureOut).r - 0.063;
- yuv.y = texture2D(tex_u, textureOut).r - 0.500;
- yuv.z = texture2D(tex_v, textureOut).r - 0.500;
-
- rgb = yuv2rgb_bt601_mat * yuv;
+
+ yuv.x = texture2D(tex_y, textureOut).r;
+ yuv.y = texture2D(tex_u, textureOut).r;
+ yuv.z = texture2D(tex_v, textureOut).r;
+
+ vec3 rgb = yuv2rgb_matrix * (yuv - yuv2rgb_offset);
+
gl_FragColor = vec4(rgb, 1.0);
}
diff --git a/src/qcam/assets/shader/YUV_packed.frag b/src/apps/qcam/assets/shader/YUV_packed.frag
index d6efd4ce..b9ef9d41 100644
--- a/src/qcam/assets/shader/YUV_packed.frag
+++ b/src/apps/qcam/assets/shader/YUV_packed.frag
@@ -14,15 +14,16 @@ varying vec2 textureOut;
uniform sampler2D tex_y;
uniform vec2 tex_step;
+const mat3 yuv2rgb_matrix = mat3(
+ YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+ YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
void main(void)
{
- mat3 yuv2rgb_bt601_mat = mat3(
- vec3(1.164, 1.164, 1.164),
- vec3(0.000, -0.392, 2.017),
- vec3(1.596, -0.813, 0.000)
- );
- vec3 yuv2rgb_bt601_offset = vec3(0.063, 0.500, 0.500);
-
/*
* The sampler won't interpolate the texture correctly along the X axis,
* as each RGBA pixel effectively stores two pixels. We thus need to
@@ -76,7 +77,7 @@ void main(void)
float y = mix(y_left, y_right, step(0.5, f_x));
- vec3 rgb = yuv2rgb_bt601_mat * (vec3(y, uv) - yuv2rgb_bt601_offset);
+ vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
gl_FragColor = vec4(rgb, 1.0);
}
diff --git a/src/qcam/assets/shader/bayer_1x_packed.frag b/src/apps/qcam/assets/shader/bayer_1x_packed.frag
index f53f5575..f53f5575 100644
--- a/src/qcam/assets/shader/bayer_1x_packed.frag
+++ b/src/apps/qcam/assets/shader/bayer_1x_packed.frag
diff --git a/src/qcam/assets/shader/bayer_8.frag b/src/apps/qcam/assets/shader/bayer_8.frag
index 4ece44ab..7e35ca88 100644
--- a/src/qcam/assets/shader/bayer_8.frag
+++ b/src/apps/qcam/assets/shader/bayer_8.frag
@@ -15,6 +15,9 @@ Copyright (C) 2021, Linaro
*/
//Pixel Shader
+#ifdef GL_ES
+precision mediump float;
+#endif
/** Monochrome RGBA or GL_LUMINANCE Bayer encoded texture.*/
uniform sampler2D tex_y;
diff --git a/src/qcam/assets/shader/bayer_8.vert b/src/apps/qcam/assets/shader/bayer_8.vert
index 3695a5e9..3695a5e9 100644
--- a/src/qcam/assets/shader/bayer_8.vert
+++ b/src/apps/qcam/assets/shader/bayer_8.vert
diff --git a/src/qcam/assets/shader/identity.vert b/src/apps/qcam/assets/shader/identity.vert
index 12c41377..12c41377 100644
--- a/src/qcam/assets/shader/identity.vert
+++ b/src/apps/qcam/assets/shader/identity.vert
diff --git a/src/qcam/assets/shader/shaders.qrc b/src/apps/qcam/assets/shader/shaders.qrc
index 96c709f9..96c709f9 100644
--- a/src/qcam/assets/shader/shaders.qrc
+++ b/src/apps/qcam/assets/shader/shaders.qrc
diff --git a/src/apps/qcam/cam_select_dialog.cpp b/src/apps/qcam/cam_select_dialog.cpp
new file mode 100644
index 00000000..c51f5974
--- /dev/null
+++ b/src/apps/qcam/cam_select_dialog.cpp
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Utkarsh Tiwari <utkarsh02t@gmail.com>
+ *
+ * qcam - Camera Selection dialog
+ */
+
+#include "cam_select_dialog.h"
+
+#include <memory>
+
+#include <libcamera/camera.h>
+#include <libcamera/camera_manager.h>
+
+#include <QComboBox>
+#include <QDialogButtonBox>
+#include <QFormLayout>
+#include <QLabel>
+#include <QString>
+
+CameraSelectorDialog::CameraSelectorDialog(libcamera::CameraManager *cameraManager,
+ QWidget *parent)
+ : QDialog(parent), cm_(cameraManager)
+{
+ /* Use a QFormLayout for the dialog. */
+ QFormLayout *layout = new QFormLayout(this);
+
+ /* Setup the camera id combo-box. */
+ cameraIdComboBox_ = new QComboBox;
+ for (const auto &cam : cm_->cameras())
+ cameraIdComboBox_->addItem(QString::fromStdString(cam->id()));
+
+ /* Set camera information labels. */
+ cameraLocation_ = new QLabel;
+ cameraModel_ = new QLabel;
+
+ updateCameraInfo(cameraIdComboBox_->currentText());
+ connect(cameraIdComboBox_, &QComboBox::currentTextChanged,
+ this, &CameraSelectorDialog::updateCameraInfo);
+
+ /* Setup the QDialogButton Box */
+ QDialogButtonBox *buttonBox =
+ new QDialogButtonBox(QDialogButtonBox::Ok |
+ QDialogButtonBox::Cancel);
+
+ connect(buttonBox, &QDialogButtonBox::accepted,
+ this, &QDialog::accept);
+ connect(buttonBox, &QDialogButtonBox::rejected,
+ this, &QDialog::reject);
+
+ /* Set the layout. */
+ layout->addRow("Camera:", cameraIdComboBox_);
+ layout->addRow("Location:", cameraLocation_);
+ layout->addRow("Model:", cameraModel_);
+ layout->addWidget(buttonBox);
+}
+
+CameraSelectorDialog::~CameraSelectorDialog() = default;
+
+std::string CameraSelectorDialog::getCameraId()
+{
+ return cameraIdComboBox_->currentText().toStdString();
+}
+
+/* Hotplug / Unplug Support. */
+void CameraSelectorDialog::addCamera(QString cameraId)
+{
+ cameraIdComboBox_->addItem(cameraId);
+}
+
+void CameraSelectorDialog::removeCamera(QString cameraId)
+{
+ int cameraIndex = cameraIdComboBox_->findText(cameraId);
+ cameraIdComboBox_->removeItem(cameraIndex);
+}
+
+/* Camera Information */
+void CameraSelectorDialog::updateCameraInfo(QString cameraId)
+{
+ const std::shared_ptr<libcamera::Camera> &camera =
+ cm_->get(cameraId.toStdString());
+
+ if (!camera)
+ return;
+
+ const libcamera::ControlList &properties = camera->properties();
+
+ const auto &location = properties.get(libcamera::properties::Location);
+ if (location) {
+ switch (*location) {
+ case libcamera::properties::CameraLocationFront:
+ cameraLocation_->setText("Internal front camera");
+ break;
+ case libcamera::properties::CameraLocationBack:
+ cameraLocation_->setText("Internal back camera");
+ break;
+ case libcamera::properties::CameraLocationExternal:
+ cameraLocation_->setText("External camera");
+ break;
+ default:
+ cameraLocation_->setText("Unknown");
+ }
+ } else {
+ cameraLocation_->setText("Unknown");
+ }
+
+ const auto &model = properties.get(libcamera::properties::Model)
+ .value_or("Unknown");
+
+ cameraModel_->setText(QString::fromStdString(model));
+}
diff --git a/src/apps/qcam/cam_select_dialog.h b/src/apps/qcam/cam_select_dialog.h
new file mode 100644
index 00000000..4bec9ea9
--- /dev/null
+++ b/src/apps/qcam/cam_select_dialog.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Utkarsh Tiwari <utkarsh02t@gmail.com>
+ *
+ * qcam - Camera Selection dialog
+ */
+
+#pragma once
+
+#include <string>
+
+#include <libcamera/camera.h>
+#include <libcamera/camera_manager.h>
+#include <libcamera/controls.h>
+#include <libcamera/property_ids.h>
+
+#include <QDialog>
+#include <QString>
+
+class QComboBox;
+class QLabel;
+
+class CameraSelectorDialog : public QDialog
+{
+ Q_OBJECT
+public:
+ CameraSelectorDialog(libcamera::CameraManager *cameraManager,
+ QWidget *parent);
+ ~CameraSelectorDialog();
+
+ std::string getCameraId();
+
+ /* Hotplug / Unplug Support. */
+ void addCamera(QString cameraId);
+ void removeCamera(QString cameraId);
+
+ /* Camera Information */
+ void updateCameraInfo(QString cameraId);
+
+private:
+ libcamera::CameraManager *cm_;
+
+ /* UI elements. */
+ QComboBox *cameraIdComboBox_;
+ QLabel *cameraLocation_;
+ QLabel *cameraModel_;
+};
diff --git a/src/qcam/format_converter.cpp b/src/apps/qcam/format_converter.cpp
index d4d3223b..32123493 100644
--- a/src/qcam/format_converter.cpp
+++ b/src/apps/qcam/format_converter.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * format_convert.cpp - qcam - Convert buffer to RGB
+ * Convert buffer to RGB
*/
#include "format_converter.h"
@@ -14,7 +14,7 @@
#include <libcamera/formats.h>
-#include "../cam/image.h"
+#include "../common/image.h"
#define RGBSHIFT 8
#ifndef MAX
@@ -93,6 +93,7 @@ int FormatConverter::configure(const libcamera::PixelFormat &format,
bpp_ = 3;
break;
case libcamera::formats::ARGB8888:
+ case libcamera::formats::XRGB8888:
formatFamily_ = RGB;
r_pos_ = 2;
g_pos_ = 1;
@@ -100,6 +101,7 @@ int FormatConverter::configure(const libcamera::PixelFormat &format,
bpp_ = 4;
break;
case libcamera::formats::RGBA8888:
+ case libcamera::formats::RGBX8888:
formatFamily_ = RGB;
r_pos_ = 3;
g_pos_ = 2;
@@ -107,6 +109,7 @@ int FormatConverter::configure(const libcamera::PixelFormat &format,
bpp_ = 4;
break;
case libcamera::formats::ABGR8888:
+ case libcamera::formats::XBGR8888:
formatFamily_ = RGB;
r_pos_ = 0;
g_pos_ = 1;
@@ -114,6 +117,7 @@ int FormatConverter::configure(const libcamera::PixelFormat &format,
bpp_ = 4;
break;
case libcamera::formats::BGRA8888:
+ case libcamera::formats::BGRX8888:
formatFamily_ = RGB;
r_pos_ = 1;
g_pos_ = 2;
diff --git a/src/qcam/format_converter.h b/src/apps/qcam/format_converter.h
index 37dbfae2..819a87a5 100644
--- a/src/qcam/format_converter.h
+++ b/src/apps/qcam/format_converter.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * format_convert.h - qcam - Convert buffer to RGB
+ * Convert buffer to RGB
*/
#pragma once
diff --git a/src/qcam/main.cpp b/src/apps/qcam/main.cpp
index d3f01a85..9846fba5 100644
--- a/src/qcam/main.cpp
+++ b/src/apps/qcam/main.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main.cpp - qcam - The libcamera GUI test application
+ * qcam - The libcamera GUI test application
*/
#include <signal.h>
@@ -13,8 +13,9 @@
#include <libcamera/camera_manager.h>
-#include "../cam/options.h"
-#include "../cam/stream_options.h"
+#include "../common/options.h"
+#include "../common/stream_options.h"
+
#include "main_window.h"
#include "message_handler.h"
diff --git a/src/qcam/main_window.cpp b/src/apps/qcam/main_window.cpp
index dd0e51f5..d515beed 100644
--- a/src/qcam/main_window.cpp
+++ b/src/apps/qcam/main_window.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main_window.cpp - qcam - Main application window
+ * qcam - Main application window
*/
#include "main_window.h"
@@ -14,12 +14,10 @@
#include <libcamera/camera_manager.h>
#include <libcamera/version.h>
-#include <QComboBox>
#include <QCoreApplication>
#include <QFileDialog>
#include <QImage>
#include <QImageWriter>
-#include <QInputDialog>
#include <QMutexLocker>
#include <QStandardPaths>
#include <QStringList>
@@ -28,8 +26,10 @@
#include <QToolButton>
#include <QtDebug>
-#include "../cam/image.h"
-#include "dng_writer.h"
+#include "../common/dng_writer.h"
+#include "../common/image.h"
+
+#include "cam_select_dialog.h"
#ifndef QT_NO_OPENGL
#include "viewfinder_gl.h"
#endif
@@ -119,14 +119,14 @@ MainWindow::MainWindow(CameraManager *cm, const OptionsParser::Options &options)
if (renderType == "qt") {
ViewFinderQt *viewfinder = new ViewFinderQt(this);
connect(viewfinder, &ViewFinderQt::renderComplete,
- this, &MainWindow::queueRequest);
+ this, &MainWindow::renderComplete);
viewfinder_ = viewfinder;
setCentralWidget(viewfinder);
#ifndef QT_NO_OPENGL
} else if (renderType == "gles") {
ViewFinderGL *viewfinder = new ViewFinderGL(this);
connect(viewfinder, &ViewFinderGL::renderComplete,
- this, &MainWindow::queueRequest);
+ this, &MainWindow::renderComplete);
viewfinder_ = viewfinder;
setCentralWidget(viewfinder);
#endif
@@ -143,6 +143,8 @@ MainWindow::MainWindow(CameraManager *cm, const OptionsParser::Options &options)
cm_->cameraAdded.connect(this, &MainWindow::addCamera);
cm_->cameraRemoved.connect(this, &MainWindow::removeCamera);
+ cameraSelectorDialog_ = new CameraSelectorDialog(cm_, this);
+
/* Open the camera and start capture. */
ret = openCamera();
if (ret < 0) {
@@ -188,18 +190,15 @@ int MainWindow::createToolbars()
action = toolbar_->addAction(QIcon::fromTheme("application-exit",
QIcon(":x-circle.svg")),
"Quit");
- action->setShortcut(Qt::CTRL | Qt::Key_Q);
+ action->setShortcut(QKeySequence::Quit);
connect(action, &QAction::triggered, this, &MainWindow::quit);
/* Camera selector. */
- cameraCombo_ = new QComboBox();
- connect(cameraCombo_, QOverload<int>::of(&QComboBox::activated),
+ cameraSelectButton_ = new QPushButton;
+ connect(cameraSelectButton_, &QPushButton::clicked,
this, &MainWindow::switchCamera);
- for (const std::shared_ptr<Camera> &cam : cm_->cameras())
- cameraCombo_->addItem(QString::fromStdString(cam->id()));
-
- toolbar_->addWidget(cameraCombo_);
+ toolbar_->addWidget(cameraSelectButton_);
toolbar_->addSeparator();
@@ -259,14 +258,18 @@ void MainWindow::updateTitle()
* Camera Selection
*/
-void MainWindow::switchCamera(int index)
+void MainWindow::switchCamera()
{
/* Get and acquire the new camera. */
- const auto &cameras = cm_->cameras();
- if (static_cast<unsigned int>(index) >= cameras.size())
+ std::string newCameraId = chooseCamera();
+
+ if (newCameraId.empty())
+ return;
+
+ if (camera_ && newCameraId == camera_->id())
return;
- const std::shared_ptr<Camera> &cam = cameras[index];
+ const std::shared_ptr<Camera> &cam = cm_->get(newCameraId);
if (cam->acquire()) {
qInfo() << "Failed to acquire camera" << cam->id().c_str();
@@ -281,32 +284,23 @@ void MainWindow::switchCamera(int index)
*/
startStopAction_->setChecked(false);
- camera_->release();
+ if (camera_)
+ camera_->release();
+
camera_ = cam;
startStopAction_->setChecked(true);
+
+ /* Display the current cameraId in the toolbar .*/
+ cameraSelectButton_->setText(QString::fromStdString(newCameraId));
}
std::string MainWindow::chooseCamera()
{
- QStringList cameras;
- bool result;
-
- /* If only one camera is available, use it automatically. */
- if (cm_->cameras().size() == 1)
- return cm_->cameras()[0]->id();
-
- /* Present a dialog box to pick a camera. */
- for (const std::shared_ptr<Camera> &cam : cm_->cameras())
- cameras.append(QString::fromStdString(cam->id()));
-
- QString id = QInputDialog::getItem(this, "Select Camera",
- "Camera:", cameras, 0,
- false, &result);
- if (!result)
+ if (cameraSelectorDialog_->exec() != QDialog::Accepted)
return std::string();
- return id.toStdString();
+ return cameraSelectorDialog_->getCameraId();
}
int MainWindow::openCamera()
@@ -338,8 +332,8 @@ int MainWindow::openCamera()
return -EBUSY;
}
- /* Set the combo-box entry with the currently selected Camera. */
- cameraCombo_->setCurrentText(QString::fromStdString(cameraName));
+ /* Set the camera switch button with the currently selected Camera id. */
+ cameraSelectButton_->setText(QString::fromStdString(cameraName));
return 0;
}
@@ -368,7 +362,7 @@ void MainWindow::toggleCapture(bool start)
*/
int MainWindow::startCapture()
{
- StreamRoles roles = StreamKeyValueParser::roles(options_[OptStream]);
+ std::vector<StreamRole> roles = StreamKeyValueParser::roles(options_[OptStream]);
int ret;
/* Verify roles are supported. */
@@ -387,11 +381,8 @@ int MainWindow::startCapture()
}
break;
default:
- if (roles.size() != 1) {
- qWarning() << "Unsupported stream configuration";
- return -EINVAL;
- }
- break;
+ qWarning() << "Unsupported stream configuration";
+ return -EINVAL;
}
/* Configure the camera. */
@@ -446,9 +437,13 @@ int MainWindow::startCapture()
else
rawStream_ = nullptr;
- /* Configure the viewfinder. */
+ /*
+ * Configure the viewfinder. If no color space is reported, default to
+ * sYCC.
+ */
ret = viewfinder_->setFormat(vfConfig.pixelFormat,
QSize(vfConfig.size.width, vfConfig.size.height),
+ vfConfig.colorSpace.value_or(ColorSpace::Sycc),
vfConfig.stride);
if (ret < 0) {
qInfo() << "Failed to set viewfinder format";
@@ -521,7 +516,7 @@ int MainWindow::startCapture()
/* Queue all requests. */
for (std::unique_ptr<Request> &request : requests_) {
- ret = camera_->queueRequest(request.get());
+ ret = queueRequest(request.get());
if (ret < 0) {
qWarning() << "Can't queue request";
goto error_disconnect;
@@ -601,21 +596,20 @@ void MainWindow::stopCapture()
void MainWindow::processHotplug(HotplugEvent *e)
{
Camera *camera = e->camera();
+ QString cameraId = QString::fromStdString(camera->id());
HotplugEvent::PlugEvent event = e->hotplugEvent();
if (event == HotplugEvent::HotPlug) {
- cameraCombo_->addItem(QString::fromStdString(camera->id()));
+ cameraSelectorDialog_->addCamera(cameraId);
} else if (event == HotplugEvent::HotUnplug) {
/* Check if the currently-streaming camera is removed. */
if (camera == camera_.get()) {
toggleCapture(false);
camera_->release();
camera_.reset();
- cameraCombo_->setCurrentIndex(0);
}
- int camIndex = cameraCombo_->findText(QString::fromStdString(camera->id()));
- cameraCombo_->removeItem(camIndex);
+ cameraSelectorDialog_->removeCamera(cameraId);
}
}
@@ -755,7 +749,7 @@ void MainWindow::processViewfinder(FrameBuffer *buffer)
viewfinder_->render(buffer, mappedBuffers_[buffer].get());
}
-void MainWindow::queueRequest(FrameBuffer *buffer)
+void MainWindow::renderComplete(FrameBuffer *buffer)
{
Request *request;
{
@@ -784,6 +778,10 @@ void MainWindow::queueRequest(FrameBuffer *buffer)
qWarning() << "No free buffer available for RAW capture";
}
}
+ queueRequest(request);
+}
- camera_->queueRequest(request);
+int MainWindow::queueRequest(Request *request)
+{
+ return camera_->queueRequest(request);
}
diff --git a/src/qcam/main_window.h b/src/apps/qcam/main_window.h
index 3fbe872c..4cead734 100644
--- a/src/qcam/main_window.h
+++ b/src/apps/qcam/main_window.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main_window.h - qcam - Main application window
+ * qcam - Main application window
*/
#pragma once
@@ -10,28 +10,30 @@
#include <memory>
#include <vector>
+#include <libcamera/camera.h>
+#include <libcamera/camera_manager.h>
+#include <libcamera/controls.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/framebuffer_allocator.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+
#include <QElapsedTimer>
#include <QIcon>
#include <QMainWindow>
#include <QMutex>
#include <QObject>
+#include <QPushButton>
#include <QQueue>
#include <QTimer>
-#include <libcamera/camera.h>
-#include <libcamera/camera_manager.h>
-#include <libcamera/controls.h>
-#include <libcamera/framebuffer.h>
-#include <libcamera/framebuffer_allocator.h>
-#include <libcamera/request.h>
-#include <libcamera/stream.h>
+#include "../common/stream_options.h"
-#include "../cam/stream_options.h"
#include "viewfinder.h"
class QAction;
-class QComboBox;
+class CameraSelectorDialog;
class Image;
class HotplugEvent;
@@ -58,7 +60,7 @@ private Q_SLOTS:
void quit();
void updateTitle();
- void switchCamera(int index);
+ void switchCamera();
void toggleCapture(bool start);
void saveImageAs();
@@ -66,7 +68,7 @@ private Q_SLOTS:
void processRaw(libcamera::FrameBuffer *buffer,
const libcamera::ControlList &metadata);
- void queueRequest(libcamera::FrameBuffer *buffer);
+ void renderComplete(libcamera::FrameBuffer *buffer);
private:
int createToolbars();
@@ -80,6 +82,7 @@ private:
void addCamera(std::shared_ptr<libcamera::Camera> camera);
void removeCamera(std::shared_ptr<libcamera::Camera> camera);
+ int queueRequest(libcamera::Request *request);
void requestComplete(libcamera::Request *request);
void processCapture();
void processHotplug(HotplugEvent *e);
@@ -88,7 +91,7 @@ private:
/* UI elements */
QToolBar *toolbar_;
QAction *startStopAction_;
- QComboBox *cameraCombo_;
+ QPushButton *cameraSelectButton_;
QAction *saveRaw_;
ViewFinder *viewfinder_;
@@ -98,6 +101,8 @@ private:
QString title_;
QTimer titleTimer_;
+ CameraSelectorDialog *cameraSelectorDialog_;
+
/* Options */
const OptionsParser::Options &options_;
diff --git a/src/qcam/meson.build b/src/apps/qcam/meson.build
index c46f4631..6cf4c171 100644
--- a/src/qcam/meson.build
+++ b/src/apps/qcam/meson.build
@@ -15,9 +15,7 @@ endif
qcam_enabled = true
qcam_sources = files([
- '../cam/image.cpp',
- '../cam/options.cpp',
- '../cam/stream_options.cpp',
+ 'cam_select_dialog.cpp',
'format_converter.cpp',
'main.cpp',
'main_window.cpp',
@@ -26,6 +24,7 @@ qcam_sources = files([
])
qcam_moc_headers = files([
+ 'cam_select_dialog.h',
'main_window.h',
'viewfinder_qt.h',
])
@@ -34,22 +33,7 @@ qcam_resources = files([
'assets/feathericons/feathericons.qrc',
])
-qcam_deps = [
- libatomic,
- libcamera_public,
- qt5_dep,
-]
-
-qt5_cpp_args = ['-DQT_NO_KEYWORDS']
-
-tiff_dep = dependency('libtiff-4', required : false)
-if tiff_dep.found()
- qt5_cpp_args += ['-DHAVE_TIFF']
- qcam_deps += [tiff_dep]
- qcam_sources += files([
- 'dng_writer.cpp',
- ])
-endif
+qt5_cpp_args = [apps_cpp_args, '-DQT_NO_KEYWORDS']
if cxx.has_header_symbol('QOpenGLWidget', 'QOpenGLWidget',
dependencies : qt5_dep, args : '-fPIC')
@@ -73,11 +57,18 @@ if ((cc.get_id() == 'gcc' and cc.version().version_compare('>=9.0') and
qt5_cpp_args += ['-Wno-deprecated-copy']
endif
-resources = qt5.preprocess(moc_headers: qcam_moc_headers,
+resources = qt5.preprocess(moc_headers : qcam_moc_headers,
qresources : qcam_resources,
- dependencies: qt5_dep)
+ dependencies : qt5_dep)
qcam = executable('qcam', qcam_sources, resources,
install : true,
- dependencies : qcam_deps,
+ install_tag : 'bin',
+ link_with : apps_lib,
+ dependencies : [
+ libatomic,
+ libcamera_public,
+ libtiff,
+ qt5_dep,
+ ],
cpp_args : qt5_cpp_args)
diff --git a/src/qcam/message_handler.cpp b/src/apps/qcam/message_handler.cpp
index 261623e1..c89714a9 100644
--- a/src/qcam/message_handler.cpp
+++ b/src/apps/qcam/message_handler.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * message_handler.cpp - qcam - Log message handling
+ * qcam - Log message handling
*/
#include "message_handler.h"
diff --git a/src/qcam/message_handler.h b/src/apps/qcam/message_handler.h
index 56294d37..92ef74d1 100644
--- a/src/qcam/message_handler.h
+++ b/src/apps/qcam/message_handler.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * message_handler.cpp - qcam - Log message handling
+ * Log message handling
*/
#pragma once
diff --git a/src/qcam/viewfinder.h b/src/apps/qcam/viewfinder.h
index 260074ae..914f88ec 100644
--- a/src/qcam/viewfinder.h
+++ b/src/apps/qcam/viewfinder.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * viewfinder.h - qcam - Viewfinder base class
+ * qcam - Viewfinder base class
*/
#pragma once
@@ -11,6 +11,7 @@
#include <QList>
#include <QSize>
+#include <libcamera/color_space.h>
#include <libcamera/formats.h>
#include <libcamera/framebuffer.h>
@@ -24,6 +25,7 @@ public:
virtual const QList<libcamera::PixelFormat> &nativeFormats() const = 0;
virtual int setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ const libcamera::ColorSpace &colorSpace,
unsigned int stride) = 0;
virtual void render(libcamera::FrameBuffer *buffer, Image *image) = 0;
virtual void stop() = 0;
diff --git a/src/qcam/viewfinder_gl.cpp b/src/apps/qcam/viewfinder_gl.cpp
index 3ae8b03a..9d2a6960 100644
--- a/src/qcam/viewfinder_gl.cpp
+++ b/src/apps/qcam/viewfinder_gl.cpp
@@ -2,18 +2,21 @@
/*
* Copyright (C) 2020, Linaro
*
- * viewfinderGL.cpp - OpenGL Viewfinder for rendering by OpenGL shader
+ * OpenGL Viewfinder for rendering by OpenGL shader
*/
#include "viewfinder_gl.h"
+#include <array>
+
#include <QByteArray>
#include <QFile>
#include <QImage>
+#include <QStringList>
#include <libcamera/formats.h>
-#include "../cam/image.h"
+#include "../common/image.h"
static const QList<libcamera::PixelFormat> supportedFormats{
/* YUV - packed (single plane) */
@@ -56,7 +59,8 @@ static const QList<libcamera::PixelFormat> supportedFormats{
};
ViewFinderGL::ViewFinderGL(QWidget *parent)
- : QOpenGLWidget(parent), buffer_(nullptr), image_(nullptr),
+ : QOpenGLWidget(parent), buffer_(nullptr),
+ colorSpace_(libcamera::ColorSpace::Raw), image_(nullptr),
vertexBuffer_(QOpenGLBuffer::VertexBuffer)
{
}
@@ -71,10 +75,11 @@ const QList<libcamera::PixelFormat> &ViewFinderGL::nativeFormats() const
return supportedFormats;
}
-int ViewFinderGL::setFormat(const libcamera::PixelFormat &format,
- const QSize &size, unsigned int stride)
+int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ const libcamera::ColorSpace &colorSpace,
+ unsigned int stride)
{
- if (format != format_) {
+ if (format != format_ || colorSpace != colorSpace_) {
/*
* If the fragment already exists, remove it and create a new
* one for the new format.
@@ -88,7 +93,10 @@ int ViewFinderGL::setFormat(const libcamera::PixelFormat &format,
if (!selectFormat(format))
return -1;
+ selectColorSpace(colorSpace);
+
format_ = format;
+ colorSpace_ = colorSpace;
}
size_ = size;
@@ -317,6 +325,74 @@ bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format)
return ret;
}
+void ViewFinderGL::selectColorSpace(const libcamera::ColorSpace &colorSpace)
+{
+ std::array<double, 9> yuv2rgb;
+
+ /* OpenGL stores arrays in column-major order. */
+ switch (colorSpace.ycbcrEncoding) {
+ case libcamera::ColorSpace::YcbcrEncoding::None:
+ default:
+ yuv2rgb = {
+ 1.0000, 0.0000, 0.0000,
+ 0.0000, 1.0000, 0.0000,
+ 0.0000, 0.0000, 1.0000,
+ };
+ break;
+
+ case libcamera::ColorSpace::YcbcrEncoding::Rec601:
+ yuv2rgb = {
+ 1.0000, 1.0000, 1.0000,
+ 0.0000, -0.3441, 1.7720,
+ 1.4020, -0.7141, 0.0000,
+ };
+ break;
+
+ case libcamera::ColorSpace::YcbcrEncoding::Rec709:
+ yuv2rgb = {
+ 1.0000, 1.0000, 1.0000,
+ 0.0000, -0.1873, 1.8856,
+ 1.5748, -0.4681, 0.0000,
+ };
+ break;
+
+ case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
+ yuv2rgb = {
+ 1.0000, 1.0000, 1.0000,
+ 0.0000, -0.1646, 1.8814,
+ 1.4746, -0.5714, 0.0000,
+ };
+ break;
+ }
+
+ double offset;
+
+ switch (colorSpace.range) {
+ case libcamera::ColorSpace::Range::Full:
+ default:
+ offset = 0.0;
+ break;
+
+ case libcamera::ColorSpace::Range::Limited:
+ offset = 16.0;
+
+ for (unsigned int i = 0; i < 3; ++i)
+ yuv2rgb[i] *= 255.0 / 219.0;
+ for (unsigned int i = 4; i < 9; ++i)
+ yuv2rgb[i] *= 255.0 / 224.0;
+ break;
+ }
+
+ QStringList matrix;
+
+ for (double coeff : yuv2rgb)
+ matrix.append(QString::number(coeff, 'f'));
+
+ fragmentShaderDefines_.append("#define YUV2RGB_MATRIX " + matrix.join(", "));
+ fragmentShaderDefines_.append(QString("#define YUV2RGB_Y_OFFSET %1")
+ .arg(offset, 0, 'f', 1));
+}
+
bool ViewFinderGL::createVertexShader()
{
/* Create Vertex Shader */
diff --git a/src/qcam/viewfinder_gl.h b/src/apps/qcam/viewfinder_gl.h
index 0a9275ba..23744b41 100644
--- a/src/qcam/viewfinder_gl.h
+++ b/src/apps/qcam/viewfinder_gl.h
@@ -2,8 +2,7 @@
/*
* Copyright (C) 2020, Linaro
*
- * viewfinder_GL.h - OpenGL Viewfinder for rendering by OpenGL shader
- *
+ * OpenGL Viewfinder for rendering by OpenGL shader
*/
#pragma once
@@ -39,6 +38,7 @@ public:
const QList<libcamera::PixelFormat> &nativeFormats() const override;
int setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ const libcamera::ColorSpace &colorSpace,
unsigned int stride) override;
void render(libcamera::FrameBuffer *buffer, Image *image) override;
void stop() override;
@@ -56,6 +56,7 @@ protected:
private:
bool selectFormat(const libcamera::PixelFormat &format);
+ void selectColorSpace(const libcamera::ColorSpace &colorSpace);
void configureTexture(QOpenGLTexture &texture);
bool createFragmentShader();
@@ -66,6 +67,7 @@ private:
/* Captured image size, format and buffer */
libcamera::FrameBuffer *buffer_;
libcamera::PixelFormat format_;
+ libcamera::ColorSpace colorSpace_;
QSize size_;
unsigned int stride_;
Image *image_;
diff --git a/src/qcam/viewfinder_qt.cpp b/src/apps/qcam/viewfinder_qt.cpp
index 6844f998..4821c27d 100644
--- a/src/qcam/viewfinder_qt.cpp
+++ b/src/apps/qcam/viewfinder_qt.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * viewfinder_qt.cpp - qcam - QPainter-based ViewFinder
+ * qcam - QPainter-based ViewFinder
*/
#include "viewfinder_qt.h"
@@ -11,6 +11,8 @@
#include <stdint.h>
#include <utility>
+#include <libcamera/formats.h>
+
#include <QImage>
#include <QImageWriter>
#include <QMap>
@@ -18,21 +20,23 @@
#include <QPainter>
#include <QtDebug>
-#include <libcamera/formats.h>
+#include "../common/image.h"
-#include "../cam/image.h"
#include "format_converter.h"
static const QMap<libcamera::PixelFormat, QImage::Format> nativeFormats
{
#if QT_VERSION >= QT_VERSION_CHECK(5, 2, 0)
- { libcamera::formats::ABGR8888, QImage::Format_RGBA8888 },
+ { libcamera::formats::ABGR8888, QImage::Format_RGBX8888 },
+ { libcamera::formats::XBGR8888, QImage::Format_RGBX8888 },
#endif
{ libcamera::formats::ARGB8888, QImage::Format_RGB32 },
+ { libcamera::formats::XRGB8888, QImage::Format_RGB32 },
#if QT_VERSION >= QT_VERSION_CHECK(5, 14, 0)
{ libcamera::formats::RGB888, QImage::Format_BGR888 },
#endif
{ libcamera::formats::BGR888, QImage::Format_RGB888 },
+ { libcamera::formats::RGB565, QImage::Format_RGB16 },
};
ViewFinderQt::ViewFinderQt(QWidget *parent)
@@ -51,8 +55,9 @@ const QList<libcamera::PixelFormat> &ViewFinderQt::nativeFormats() const
return formats;
}
-int ViewFinderQt::setFormat(const libcamera::PixelFormat &format,
- const QSize &size, unsigned int stride)
+int ViewFinderQt::setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ [[maybe_unused]] const libcamera::ColorSpace &colorSpace,
+ unsigned int stride)
{
image_ = QImage();
@@ -67,7 +72,8 @@ int ViewFinderQt::setFormat(const libcamera::PixelFormat &format,
image_ = QImage(size, QImage::Format_RGB32);
- qInfo() << "Using software format conversion from" << format;
+ qInfo() << "Using software format conversion from"
+ << format.toString().c_str();
} else {
qInfo() << "Zero-copy enabled";
}
diff --git a/src/qcam/viewfinder_qt.h b/src/apps/qcam/viewfinder_qt.h
index 8c621452..4f4b9f11 100644
--- a/src/qcam/viewfinder_qt.h
+++ b/src/apps/qcam/viewfinder_qt.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * viewfinder_qt.h - qcam - QPainter-based ViewFinder
+ * qcam - QPainter-based ViewFinder
*/
#pragma once
@@ -32,6 +32,7 @@ public:
const QList<libcamera::PixelFormat> &nativeFormats() const override;
int setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ const libcamera::ColorSpace &colorSpace,
unsigned int stride) override;
void render(libcamera::FrameBuffer *buffer, Image *image) override;
void stop() override;
diff --git a/src/cam/capture_script.cpp b/src/cam/capture_script.cpp
deleted file mode 100644
index 9f22d5f7..00000000
--- a/src/cam/capture_script.cpp
+++ /dev/null
@@ -1,336 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2022, Ideas on Board Oy
- *
- * capture_script.cpp - Capture session configuration script
- */
-
-#include "capture_script.h"
-
-#include <iostream>
-#include <stdio.h>
-#include <stdlib.h>
-
-using namespace libcamera;
-
-CaptureScript::CaptureScript(std::shared_ptr<Camera> camera,
- const std::string &fileName)
- : camera_(camera), valid_(false)
-{
- FILE *fh = fopen(fileName.c_str(), "r");
- if (!fh) {
- int ret = -errno;
- std::cerr << "Failed to open capture script " << fileName
- << ": " << strerror(-ret) << std::endl;
- return;
- }
-
- /*
- * Map the camera's controls to their name so that they can be
- * easily identified when parsing the script file.
- */
- for (const auto &[control, info] : camera_->controls())
- controls_[control->name()] = control;
-
- int ret = parseScript(fh);
- fclose(fh);
- if (ret)
- return;
-
- valid_ = true;
-}
-
-/* Retrieve the control list associated with a frame number. */
-const ControlList &CaptureScript::frameControls(unsigned int frame)
-{
- static ControlList controls{};
-
- auto it = frameControls_.find(frame);
- if (it == frameControls_.end())
- return controls;
-
- return it->second;
-}
-
-CaptureScript::EventPtr CaptureScript::nextEvent(yaml_event_type_t expectedType)
-{
- EventPtr event(new yaml_event_t);
-
- if (!yaml_parser_parse(&parser_, event.get()))
- return nullptr;
-
- if (expectedType != YAML_NO_EVENT && !checkEvent(event, expectedType))
- return nullptr;
-
- return event;
-}
-
-bool CaptureScript::checkEvent(const EventPtr &event, yaml_event_type_t expectedType) const
-{
- if (event->type != expectedType) {
- std::cerr << "Capture script error on line " << event->start_mark.line
- << " column " << event->start_mark.column << ": "
- << "Expected " << eventTypeName(expectedType)
- << " event, got " << eventTypeName(event->type)
- << std::endl;
- return false;
- }
-
- return true;
-}
-
-std::string CaptureScript::eventScalarValue(const EventPtr &event)
-{
- return std::string(reinterpret_cast<char *>(event->data.scalar.value),
- event->data.scalar.length);
-}
-
-std::string CaptureScript::eventTypeName(yaml_event_type_t type)
-{
- static const std::map<yaml_event_type_t, std::string> typeNames = {
- { YAML_STREAM_START_EVENT, "stream-start" },
- { YAML_STREAM_END_EVENT, "stream-end" },
- { YAML_DOCUMENT_START_EVENT, "document-start" },
- { YAML_DOCUMENT_END_EVENT, "document-end" },
- { YAML_ALIAS_EVENT, "alias" },
- { YAML_SCALAR_EVENT, "scalar" },
- { YAML_SEQUENCE_START_EVENT, "sequence-start" },
- { YAML_SEQUENCE_END_EVENT, "sequence-end" },
- { YAML_MAPPING_START_EVENT, "mapping-start" },
- { YAML_MAPPING_END_EVENT, "mapping-end" },
- };
-
- auto it = typeNames.find(type);
- if (it == typeNames.end())
- return "[type " + std::to_string(type) + "]";
-
- return it->second;
-}
-
-int CaptureScript::parseScript(FILE *script)
-{
- int ret = yaml_parser_initialize(&parser_);
- if (!ret) {
- std::cerr << "Failed to initialize yaml parser" << std::endl;
- return ret;
- }
-
- /* Delete the parser upon function exit. */
- struct ParserDeleter {
- ParserDeleter(yaml_parser_t *parser) : parser_(parser) { }
- ~ParserDeleter() { yaml_parser_delete(parser_); }
- yaml_parser_t *parser_;
- } deleter(&parser_);
-
- yaml_parser_set_input_file(&parser_, script);
-
- EventPtr event = nextEvent(YAML_STREAM_START_EVENT);
- if (!event)
- return -EINVAL;
-
- event = nextEvent(YAML_DOCUMENT_START_EVENT);
- if (!event)
- return -EINVAL;
-
- event = nextEvent(YAML_MAPPING_START_EVENT);
- if (!event)
- return -EINVAL;
-
- while (1) {
- event = nextEvent();
- if (!event)
- return -EINVAL;
-
- if (event->type == YAML_MAPPING_END_EVENT)
- return 0;
-
- if (!checkEvent(event, YAML_SCALAR_EVENT))
- return -EINVAL;
-
- std::string section = eventScalarValue(event);
-
- if (section == "frames") {
- parseFrames();
- } else {
- std::cerr << "Unsupported section '" << section << "'"
- << std::endl;
- return -EINVAL;
- }
- }
-}
-
-int CaptureScript::parseFrames()
-{
- EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
- if (!event)
- return -EINVAL;
-
- while (1) {
- event = nextEvent();
- if (!event)
- return -EINVAL;
-
- if (event->type == YAML_SEQUENCE_END_EVENT)
- return 0;
-
- int ret = parseFrame(std::move(event));
- if (ret)
- return ret;
- }
-}
-
-int CaptureScript::parseFrame(EventPtr event)
-{
- if (!checkEvent(event, YAML_MAPPING_START_EVENT))
- return -EINVAL;
-
- std::string key = parseScalar();
- if (key.empty())
- return -EINVAL;
-
- unsigned int frameId = atoi(key.c_str());
-
- event = nextEvent(YAML_MAPPING_START_EVENT);
- if (!event)
- return -EINVAL;
-
- ControlList controls{};
-
- while (1) {
- event = nextEvent();
- if (!event)
- return -EINVAL;
-
- if (event->type == YAML_MAPPING_END_EVENT)
- break;
-
- int ret = parseControl(std::move(event), controls);
- if (ret)
- return ret;
- }
-
- frameControls_[frameId] = std::move(controls);
-
- event = nextEvent(YAML_MAPPING_END_EVENT);
- if (!event)
- return -EINVAL;
-
- return 0;
-}
-
-int CaptureScript::parseControl(EventPtr event, ControlList &controls)
-{
- /* We expect a value after a key. */
- std::string name = eventScalarValue(event);
- if (name.empty())
- return -EINVAL;
-
- /* If the camera does not support the control just ignore it. */
- auto it = controls_.find(name);
- if (it == controls_.end()) {
- std::cerr << "Unsupported control '" << name << "'" << std::endl;
- return -EINVAL;
- }
-
- std::string value = parseScalar();
- if (value.empty())
- return -EINVAL;
-
- const ControlId *controlId = it->second;
- ControlValue val = unpackControl(controlId, value);
- controls.set(controlId->id(), val);
-
- return 0;
-}
-
-std::string CaptureScript::parseScalar()
-{
- EventPtr event = nextEvent(YAML_SCALAR_EVENT);
- if (!event)
- return "";
-
- return eventScalarValue(event);
-}
-
-void CaptureScript::unpackFailure(const ControlId *id, const std::string &repr)
-{
- static const std::map<unsigned int, const char *> typeNames = {
- { ControlTypeNone, "none" },
- { ControlTypeBool, "bool" },
- { ControlTypeByte, "byte" },
- { ControlTypeInteger32, "int32" },
- { ControlTypeInteger64, "int64" },
- { ControlTypeFloat, "float" },
- { ControlTypeString, "string" },
- { ControlTypeRectangle, "Rectangle" },
- { ControlTypeSize, "Size" },
- };
-
- const char *typeName;
- auto it = typeNames.find(id->type());
- if (it != typeNames.end())
- typeName = it->second;
- else
- typeName = "unknown";
-
- std::cerr << "Unsupported control '" << repr << "' for "
- << typeName << " control " << id->name() << std::endl;
-}
-
-ControlValue CaptureScript::unpackControl(const ControlId *id,
- const std::string &repr)
-{
- ControlValue value{};
-
- switch (id->type()) {
- case ControlTypeNone:
- break;
- case ControlTypeBool: {
- bool val;
-
- if (repr == "true") {
- val = true;
- } else if (repr == "false") {
- val = false;
- } else {
- unpackFailure(id, repr);
- return value;
- }
-
- value.set<bool>(val);
- break;
- }
- case ControlTypeByte: {
- uint8_t val = strtol(repr.c_str(), NULL, 10);
- value.set<uint8_t>(val);
- break;
- }
- case ControlTypeInteger32: {
- int32_t val = strtol(repr.c_str(), NULL, 10);
- value.set<int32_t>(val);
- break;
- }
- case ControlTypeInteger64: {
- int64_t val = strtoll(repr.c_str(), NULL, 10);
- value.set<int64_t>(val);
- break;
- }
- case ControlTypeFloat: {
- float val = strtof(repr.c_str(), NULL);
- value.set<float>(val);
- break;
- }
- case ControlTypeString: {
- value.set<std::string>(repr);
- break;
- }
- case ControlTypeRectangle:
- /* \todo Parse rectangles. */
- break;
- case ControlTypeSize:
- /* \todo Parse Sizes. */
- break;
- }
-
- return value;
-}
diff --git a/src/cam/sdl_texture_mjpg.cpp b/src/cam/sdl_texture_mjpg.cpp
deleted file mode 100644
index 69e99ad3..00000000
--- a/src/cam/sdl_texture_mjpg.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2022, Ideas on Board Oy
- *
- * sdl_texture_mjpg.cpp - SDL Texture MJPG
- */
-
-#include "sdl_texture_mjpg.h"
-
-#include <SDL2/SDL_image.h>
-
-using namespace libcamera;
-
-SDLTextureMJPG::SDLTextureMJPG(const SDL_Rect &rect)
- : SDLTexture(rect, SDL_PIXELFORMAT_RGB24, 0)
-{
-}
-
-void SDLTextureMJPG::update(const Span<uint8_t> &data)
-{
- SDL_RWops *bufferStream = SDL_RWFromMem(data.data(), data.size());
- SDL_Surface *frame = IMG_Load_RW(bufferStream, 0);
- SDL_UpdateTexture(ptr_, nullptr, frame->pixels, frame->pitch);
- SDL_FreeSurface(frame);
-}
diff --git a/src/cam/sdl_texture_yuyv.cpp b/src/cam/sdl_texture_yuyv.cpp
deleted file mode 100644
index cc161b2c..00000000
--- a/src/cam/sdl_texture_yuyv.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2022, Ideas on Board Oy
- *
- * sdl_texture_yuyv.cpp - SDL Texture YUYV
- */
-
-#include "sdl_texture_yuyv.h"
-
-using namespace libcamera;
-
-SDLTextureYUYV::SDLTextureYUYV(const SDL_Rect &rect)
- : SDLTexture(rect, SDL_PIXELFORMAT_YUY2, 4 * ((rect.w + 1) / 2))
-{
-}
-
-void SDLTextureYUYV::update(const Span<uint8_t> &data)
-{
- SDL_UpdateTexture(ptr_, &rect_, data.data(), pitch_);
-}
diff --git a/src/cam/sdl_texture_yuyv.h b/src/cam/sdl_texture_yuyv.h
deleted file mode 100644
index 9f7c72f0..00000000
--- a/src/cam/sdl_texture_yuyv.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2022, Ideas on Board Oy
- *
- * sdl_texture_yuyv.h - SDL Texture YUYV
- */
-
-#pragma once
-
-#include "sdl_texture.h"
-
-class SDLTextureYUYV : public SDLTexture
-{
-public:
- SDLTextureYUYV(const SDL_Rect &rect);
- void update(const libcamera::Span<uint8_t> &data) override;
-};
diff --git a/src/gstreamer/gstlibcamera-utils.cpp b/src/gstreamer/gstlibcamera-utils.cpp
index 3f242286..ec4da435 100644
--- a/src/gstreamer/gstlibcamera-utils.cpp
+++ b/src/gstreamer/gstlibcamera-utils.cpp
@@ -3,11 +3,12 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera-utils.c - GStreamer libcamera Utility Function
+ * GStreamer libcamera Utility Function
*/
#include "gstlibcamera-utils.h"
+#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
using namespace libcamera;
@@ -19,9 +20,47 @@ static struct {
/* Compressed */
{ GST_VIDEO_FORMAT_ENCODED, formats::MJPEG },
- /* RGB */
+ /* Bayer formats, gstreamer only supports 8-bit */
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR8 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG8 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG8 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB8 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR10 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG10 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG10 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB10 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR12 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG12 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG12 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB12 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR14 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG14 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG14 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB14 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR16 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG16 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG16 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB16 },
+
+ /* Monochrome */
+ { GST_VIDEO_FORMAT_GRAY8, formats::R8 },
+ { GST_VIDEO_FORMAT_GRAY16_LE, formats::R16 },
+
+ /* RGB16 */
+ { GST_VIDEO_FORMAT_RGB16, formats::RGB565 },
+
+ /* RGB24 */
{ GST_VIDEO_FORMAT_RGB, formats::BGR888 },
{ GST_VIDEO_FORMAT_BGR, formats::RGB888 },
+
+ /* RGB32 */
+ { GST_VIDEO_FORMAT_BGRx, formats::XRGB8888 },
+ { GST_VIDEO_FORMAT_RGBx, formats::XBGR8888 },
+ { GST_VIDEO_FORMAT_xBGR, formats::RGBX8888 },
+ { GST_VIDEO_FORMAT_xRGB, formats::BGRX8888 },
+ { GST_VIDEO_FORMAT_BGRA, formats::ARGB8888 },
+ { GST_VIDEO_FORMAT_RGBA, formats::ABGR8888 },
+ { GST_VIDEO_FORMAT_ABGR, formats::RGBA8888 },
{ GST_VIDEO_FORMAT_ARGB, formats::BGRA8888 },
/* YUV Semiplanar */
@@ -45,6 +84,154 @@ static struct {
/* \todo NV42 is used in libcamera but is not mapped in GStreamer yet. */
};
+static GstVideoColorimetry
+colorimetry_from_colorspace(const ColorSpace &colorSpace)
+{
+ GstVideoColorimetry colorimetry;
+
+ switch (colorSpace.primaries) {
+ case ColorSpace::Primaries::Raw:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
+ break;
+ case ColorSpace::Primaries::Smpte170m:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
+ break;
+ case ColorSpace::Primaries::Rec709:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
+ break;
+ case ColorSpace::Primaries::Rec2020:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT2020;
+ break;
+ }
+
+ switch (colorSpace.transferFunction) {
+ case ColorSpace::TransferFunction::Linear:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10;
+ break;
+ case ColorSpace::TransferFunction::Srgb:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_SRGB;
+ break;
+ case ColorSpace::TransferFunction::Rec709:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
+ break;
+ }
+
+ switch (colorSpace.ycbcrEncoding) {
+ case ColorSpace::YcbcrEncoding::None:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_RGB;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec601:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec709:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec2020:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT2020;
+ break;
+ }
+
+ switch (colorSpace.range) {
+ case ColorSpace::Range::Full:
+ colorimetry.range = GST_VIDEO_COLOR_RANGE_0_255;
+ break;
+ case ColorSpace::Range::Limited:
+ colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;
+ break;
+ }
+
+ return colorimetry;
+}
+
+static std::optional<ColorSpace>
+colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry)
+{
+ std::optional<ColorSpace> colorspace = ColorSpace::Raw;
+
+ switch (colorimetry.primaries) {
+ case GST_VIDEO_COLOR_PRIMARIES_UNKNOWN:
+ /* Unknown primaries map to raw colorspace in gstreamer */
+ return ColorSpace::Raw;
+ case GST_VIDEO_COLOR_PRIMARIES_SMPTE170M:
+ colorspace->primaries = ColorSpace::Primaries::Smpte170m;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT709:
+ colorspace->primaries = ColorSpace::Primaries::Rec709;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT2020:
+ colorspace->primaries = ColorSpace::Primaries::Rec2020;
+ break;
+ default:
+ GST_WARNING("Colorimetry primaries %d not mapped in gstlibcamera",
+ colorimetry.primaries);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.transfer) {
+ /* Transfer function mappings inspired from v4l2src plugin */
+ case GST_VIDEO_TRANSFER_GAMMA18:
+ case GST_VIDEO_TRANSFER_GAMMA20:
+ case GST_VIDEO_TRANSFER_GAMMA22:
+ case GST_VIDEO_TRANSFER_GAMMA28:
+ GST_WARNING("GAMMA 18, 20, 22, 28 transfer functions not supported");
+ [[fallthrough]];
+ case GST_VIDEO_TRANSFER_GAMMA10:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Linear;
+ break;
+ case GST_VIDEO_TRANSFER_SRGB:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Srgb;
+ break;
+#if GST_CHECK_VERSION(1, 18, 0)
+ case GST_VIDEO_TRANSFER_BT601:
+ case GST_VIDEO_TRANSFER_BT2020_10:
+#endif
+ case GST_VIDEO_TRANSFER_BT2020_12:
+ case GST_VIDEO_TRANSFER_BT709:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Rec709;
+ break;
+ default:
+ GST_WARNING("Colorimetry transfer function %d not mapped in gstlibcamera",
+ colorimetry.transfer);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.matrix) {
+ case GST_VIDEO_COLOR_MATRIX_RGB:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ break;
+ /* FCC is about the same as BT601 with less digit */
+ case GST_VIDEO_COLOR_MATRIX_FCC:
+ case GST_VIDEO_COLOR_MATRIX_BT601:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec601;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_BT709:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec709;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_BT2020:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec2020;
+ break;
+ default:
+ GST_WARNING("Colorimetry matrix %d not mapped in gstlibcamera",
+ colorimetry.matrix);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.range) {
+ case GST_VIDEO_COLOR_RANGE_0_255:
+ colorspace->range = ColorSpace::Range::Full;
+ break;
+ case GST_VIDEO_COLOR_RANGE_16_235:
+ colorspace->range = ColorSpace::Range::Limited;
+ break;
+ default:
+ GST_WARNING("Colorimetry range %d not mapped in gstlibcamera",
+ colorimetry.range);
+ return std::nullopt;
+ }
+
+ return colorspace;
+}
+
static GstVideoFormat
pixel_format_to_gst_format(const PixelFormat &format)
{
@@ -67,6 +254,54 @@ gst_format_to_pixel_format(GstVideoFormat gst_format)
return PixelFormat{};
}
+static const gchar *
+bayer_format_to_string(int format)
+{
+ switch (format) {
+ case formats::SBGGR8:
+ return "bggr";
+ case formats::SGBRG8:
+ return "gbrg";
+ case formats::SGRBG8:
+ return "grbg";
+ case formats::SRGGB8:
+ return "rggb";
+ case formats::SBGGR10:
+ return "bggr10le";
+ case formats::SGBRG10:
+ return "gbrg10le";
+ case formats::SGRBG10:
+ return "grbg10le";
+ case formats::SRGGB10:
+ return "rggb10le";
+ case formats::SBGGR12:
+ return "bggr12le";
+ case formats::SGBRG12:
+ return "gbrg12le";
+ case formats::SGRBG12:
+ return "grbg12le";
+ case formats::SRGGB12:
+ return "rggb12le";
+ case formats::SBGGR14:
+ return "bggr14le";
+ case formats::SGBRG14:
+ return "gbrg14le";
+ case formats::SGRBG14:
+ return "grbg14le";
+ case formats::SRGGB14:
+ return "rggb14le";
+ case formats::SBGGR16:
+ return "bggr16le";
+ case formats::SGBRG16:
+ return "gbrg16le";
+ case formats::SGRBG16:
+ return "grbg16le";
+ case formats::SRGGB16:
+ return "rggb16le";
+ }
+ return NULL;
+}
+
static GstStructure *
bare_structure_from_format(const PixelFormat &format)
{
@@ -82,6 +317,14 @@ bare_structure_from_format(const PixelFormat &format)
switch (format) {
case formats::MJPEG:
return gst_structure_new_empty("image/jpeg");
+
+ case formats::SBGGR8:
+ case formats::SGBRG8:
+ case formats::SGRBG8:
+ case formats::SRGGB8:
+ return gst_structure_new("video/x-bayer", "format", G_TYPE_STRING,
+ bayer_format_to_string(format), nullptr);
+
default:
return nullptr;
}
@@ -139,6 +382,18 @@ gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg
"width", G_TYPE_INT, stream_cfg.size.width,
"height", G_TYPE_INT, stream_cfg.size.height,
nullptr);
+
+ if (stream_cfg.colorSpace) {
+ GstVideoColorimetry colorimetry = colorimetry_from_colorspace(stream_cfg.colorSpace.value());
+ g_autofree gchar *colorimetry_str = gst_video_colorimetry_to_string(&colorimetry);
+
+ if (colorimetry_str)
+ gst_structure_set(s, "colorimetry", G_TYPE_STRING, colorimetry_str, nullptr);
+ else
+ g_error("Got invalid colorimetry from ColorSpace: %s",
+ ColorSpace::toString(stream_cfg.colorSpace).c_str());
+ }
+
gst_caps_append_structure(caps, s);
return caps;
@@ -222,18 +477,110 @@ gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
gst_structure_get_int(s, "height", &height);
stream_cfg.size.width = width;
stream_cfg.size.height = height;
+
+ /* Configure colorimetry */
+ if (gst_structure_has_field(s, "colorimetry")) {
+ const gchar *colorimetry_str = gst_structure_get_string(s, "colorimetry");
+ GstVideoColorimetry colorimetry;
+
+ if (!gst_video_colorimetry_from_string(&colorimetry, colorimetry_str))
+ g_critical("Invalid colorimetry %s", colorimetry_str);
+
+ stream_cfg.colorSpace = colorspace_from_colorimetry(colorimetry);
+ }
}
-void
-gst_libcamera_resume_task(GstTask *task)
+void gst_libcamera_get_framerate_from_caps(GstCaps *caps,
+ GstStructure *element_caps)
+{
+ GstStructure *s = gst_caps_get_structure(caps, 0);
+ /*
+ * Default to 30 fps. If the "framerate" fraction is invalid below,
+ * libcamerasrc will set 30fps as the framerate.
+ */
+ gint fps_n = 30, fps_d = 1;
+
+ if (gst_structure_has_field_typed(s, "framerate", GST_TYPE_FRACTION)) {
+ if (!gst_structure_get_fraction(s, "framerate", &fps_n, &fps_d))
+ GST_WARNING("Invalid framerate in the caps");
+ }
+
+ gst_structure_set(element_caps, "framerate", GST_TYPE_FRACTION,
+ fps_n, fps_d, nullptr);
+}
+
+void gst_libcamera_clamp_and_set_frameduration(ControlList &initCtrls,
+ const ControlInfoMap &cam_ctrls,
+ GstStructure *element_caps)
+{
+ gint fps_caps_n, fps_caps_d;
+
+ if (!gst_structure_has_field_typed(element_caps, "framerate", GST_TYPE_FRACTION))
+ return;
+
+ auto iterFrameDuration = cam_ctrls.find(&controls::FrameDurationLimits);
+ if (iterFrameDuration == cam_ctrls.end()) {
+ GST_WARNING("FrameDurationLimits not found in camera controls.");
+ return;
+ }
+
+ const GValue *framerate = gst_structure_get_value(element_caps, "framerate");
+
+ fps_caps_n = gst_value_get_fraction_numerator(framerate);
+ fps_caps_d = gst_value_get_fraction_denominator(framerate);
+
+ int64_t target_duration = (fps_caps_d * 1000000.0) / fps_caps_n;
+ int64_t min_frame_duration = iterFrameDuration->second.min().get<int64_t>();
+ int64_t max_frame_duration = iterFrameDuration->second.max().get<int64_t>();
+
+ int64_t frame_duration = std::clamp(target_duration,
+ min_frame_duration,
+ max_frame_duration);
+
+ if (frame_duration != target_duration) {
+ gint framerate_clamped = 1000000 / frame_duration;
+
+ /*
+ * Update the clamped framerate which then will be exposed in
+ * downstream caps.
+ */
+ gst_structure_set(element_caps, "framerate", GST_TYPE_FRACTION,
+ framerate_clamped, 1, nullptr);
+ }
+
+ initCtrls.set(controls::FrameDurationLimits,
+ { frame_duration, frame_duration });
+}
+
+void gst_libcamera_framerate_to_caps(GstCaps *caps, const GstStructure *element_caps)
+{
+ const GValue *framerate = gst_structure_get_value(element_caps, "framerate");
+ if (!GST_VALUE_HOLDS_FRACTION(framerate))
+ return;
+
+ GstStructure *s = gst_caps_get_structure(caps, 0);
+ gint fps_caps_n, fps_caps_d;
+
+ fps_caps_n = gst_value_get_fraction_numerator(framerate);
+ fps_caps_d = gst_value_get_fraction_denominator(framerate);
+
+ gst_structure_set(s, "framerate", GST_TYPE_FRACTION, fps_caps_n, fps_caps_d, nullptr);
+}
+
+#if !GST_CHECK_VERSION(1, 17, 1)
+gboolean
+gst_task_resume(GstTask *task)
{
/* We only want to resume the task if it's paused. */
GLibLocker lock(GST_OBJECT(task));
- if (GST_TASK_STATE(task) == GST_TASK_PAUSED) {
- GST_TASK_STATE(task) = GST_TASK_STARTED;
- GST_TASK_SIGNAL(task);
- }
+ if (GST_TASK_STATE(task) != GST_TASK_PAUSED)
+ return FALSE;
+
+ GST_TASK_STATE(task) = GST_TASK_STARTED;
+ GST_TASK_SIGNAL(task);
+ return TRUE;
}
+#endif
G_LOCK_DEFINE_STATIC(cm_singleton_lock);
static std::weak_ptr<CameraManager> cm_singleton_ptr;
diff --git a/src/gstreamer/gstlibcamera-utils.h b/src/gstreamer/gstlibcamera-utils.h
index d54f1588..cab1c814 100644
--- a/src/gstreamer/gstlibcamera-utils.h
+++ b/src/gstreamer/gstlibcamera-utils.h
@@ -3,12 +3,13 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera-utils.h - GStreamer libcamera Utility Functions
+ * GStreamer libcamera Utility Functions
*/
#pragma once
#include <libcamera/camera_manager.h>
+#include <libcamera/controls.h>
#include <libcamera/stream.h>
#include <gst/gst.h>
@@ -18,7 +19,22 @@ GstCaps *gst_libcamera_stream_formats_to_caps(const libcamera::StreamFormats &fo
GstCaps *gst_libcamera_stream_configuration_to_caps(const libcamera::StreamConfiguration &stream_cfg);
void gst_libcamera_configure_stream_from_caps(libcamera::StreamConfiguration &stream_cfg,
GstCaps *caps);
-void gst_libcamera_resume_task(GstTask *task);
+void gst_libcamera_get_framerate_from_caps(GstCaps *caps, GstStructure *element_caps);
+void gst_libcamera_clamp_and_set_frameduration(libcamera::ControlList &controls,
+ const libcamera::ControlInfoMap &camera_controls,
+ GstStructure *element_caps);
+void gst_libcamera_framerate_to_caps(GstCaps *caps, const GstStructure *element_caps);
+
+#if !GST_CHECK_VERSION(1, 16, 0)
+static inline void gst_clear_event(GstEvent **event_ptr)
+{
+ g_clear_pointer(event_ptr, gst_mini_object_unref);
+}
+#endif
+
+#if !GST_CHECK_VERSION(1, 17, 1)
+gboolean gst_task_resume(GstTask *task);
+#endif
std::shared_ptr<libcamera::CameraManager> gst_libcamera_get_camera_manager(int &ret);
/**
diff --git a/src/gstreamer/gstlibcamera.cpp b/src/gstreamer/gstlibcamera.cpp
index 52388b5e..bff98979 100644
--- a/src/gstreamer/gstlibcamera.cpp
+++ b/src/gstreamer/gstlibcamera.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera.c - GStreamer plugin
+ * GStreamer plugin
*/
#include "gstlibcameraprovider.h"
diff --git a/src/gstreamer/gstlibcameraallocator.cpp b/src/gstreamer/gstlibcameraallocator.cpp
index c740b8fc..741ed592 100644
--- a/src/gstreamer/gstlibcameraallocator.cpp
+++ b/src/gstreamer/gstlibcameraallocator.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraallocator.cpp - GStreamer Custom Allocator
+ * GStreamer Custom Allocator
*/
#include "gstlibcameraallocator.h"
diff --git a/src/gstreamer/gstlibcameraallocator.h b/src/gstreamer/gstlibcameraallocator.h
index 0a08c3bb..1a6ba346 100644
--- a/src/gstreamer/gstlibcameraallocator.h
+++ b/src/gstreamer/gstlibcameraallocator.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraallocator.h - GStreamer Custom Allocator
+ * GStreamer Custom Allocator
*/
#pragma once
diff --git a/src/gstreamer/gstlibcamerapad.cpp b/src/gstreamer/gstlibcamerapad.cpp
index c00e81c8..7b22aebe 100644
--- a/src/gstreamer/gstlibcamerapad.cpp
+++ b/src/gstreamer/gstlibcamerapad.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapad.cpp - GStreamer Capture Pad
+ * GStreamer Capture Pad
*/
#include "gstlibcamerapad.h"
@@ -18,7 +18,6 @@ struct _GstLibcameraPad {
GstPad parent;
StreamRole role;
GstLibcameraPool *pool;
- GQueue pending_buffers;
GstClockTime latency;
};
@@ -55,7 +54,7 @@ gst_libcamera_pad_get_property(GObject *object, guint prop_id, GValue *value,
switch (prop_id) {
case PROP_STREAM_ROLE:
- g_value_set_enum(value, self->role);
+ g_value_set_enum(value, static_cast<gint>(self->role));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
@@ -88,9 +87,19 @@ gst_libcamera_stream_role_get_type()
{
static GType type = 0;
static const GEnumValue values[] = {
- { StillCapture, "libcamera::StillCapture", "still-capture" },
- { VideoRecording, "libcamera::VideoRecording", "video-recording" },
- { Viewfinder, "libcamera::Viewfinder", "view-finder" },
+ {
+ static_cast<gint>(StreamRole::StillCapture),
+ "libcamera::StillCapture",
+ "still-capture",
+ }, {
+ static_cast<gint>(StreamRole::VideoRecording),
+ "libcamera::VideoRecording",
+ "video-recording",
+ }, {
+ static_cast<gint>(StreamRole::Viewfinder),
+ "libcamera::Viewfinder",
+ "view-finder",
+ },
{ 0, NULL, NULL }
};
@@ -111,7 +120,7 @@ gst_libcamera_pad_class_init(GstLibcameraPadClass *klass)
auto *spec = g_param_spec_enum("stream-role", "Stream Role",
"The selected stream role",
gst_libcamera_stream_role_get_type(),
- VideoRecording,
+ static_cast<gint>(StreamRole::VideoRecording),
(GParamFlags)(GST_PARAM_MUTABLE_READY
| G_PARAM_CONSTRUCT
| G_PARAM_READWRITE
@@ -156,40 +165,6 @@ gst_libcamera_pad_get_stream(GstPad *pad)
}
void
-gst_libcamera_pad_queue_buffer(GstPad *pad, GstBuffer *buffer)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GLibLocker lock(GST_OBJECT(self));
-
- g_queue_push_head(&self->pending_buffers, buffer);
-}
-
-GstFlowReturn
-gst_libcamera_pad_push_pending(GstPad *pad)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GstBuffer *buffer;
-
- {
- GLibLocker lock(GST_OBJECT(self));
- buffer = GST_BUFFER(g_queue_pop_tail(&self->pending_buffers));
- }
-
- if (!buffer)
- return GST_FLOW_OK;
-
- return gst_pad_push(pad, buffer);
-}
-
-bool
-gst_libcamera_pad_has_pending(GstPad *pad)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GLibLocker lock(GST_OBJECT(self));
- return self->pending_buffers.length > 0;
-}
-
-void
gst_libcamera_pad_set_latency(GstPad *pad, GstClockTime latency)
{
auto *self = GST_LIBCAMERA_PAD(pad);
diff --git a/src/gstreamer/gstlibcamerapad.h b/src/gstreamer/gstlibcamerapad.h
index 20769517..630c168a 100644
--- a/src/gstreamer/gstlibcamerapad.h
+++ b/src/gstreamer/gstlibcamerapad.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapad.h - GStreamer Capture Element
+ * GStreamer Capture Element
*/
#pragma once
@@ -25,10 +25,4 @@ void gst_libcamera_pad_set_pool(GstPad *pad, GstLibcameraPool *pool);
libcamera::Stream *gst_libcamera_pad_get_stream(GstPad *pad);
-void gst_libcamera_pad_queue_buffer(GstPad *pad, GstBuffer *buffer);
-
-GstFlowReturn gst_libcamera_pad_push_pending(GstPad *pad);
-
-bool gst_libcamera_pad_has_pending(GstPad *pad);
-
void gst_libcamera_pad_set_latency(GstPad *pad, GstClockTime latency);
diff --git a/src/gstreamer/gstlibcamerapool.cpp b/src/gstreamer/gstlibcamerapool.cpp
index 1fde4213..9661c67a 100644
--- a/src/gstreamer/gstlibcamerapool.cpp
+++ b/src/gstreamer/gstlibcamerapool.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapool.cpp - GStreamer Buffer Pool
+ * GStreamer Buffer Pool
*/
#include "gstlibcamerapool.h"
@@ -134,13 +134,6 @@ gst_libcamera_pool_get_stream(GstLibcameraPool *self)
return self->stream;
}
-Stream *
-gst_libcamera_buffer_get_stream(GstBuffer *buffer)
-{
- auto *self = (GstLibcameraPool *)buffer->pool;
- return self->stream;
-}
-
FrameBuffer *
gst_libcamera_buffer_get_frame_buffer(GstBuffer *buffer)
{
diff --git a/src/gstreamer/gstlibcamerapool.h b/src/gstreamer/gstlibcamerapool.h
index 05795d21..2a7a9c77 100644
--- a/src/gstreamer/gstlibcamerapool.h
+++ b/src/gstreamer/gstlibcamerapool.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapool.h - GStreamer Buffer Pool
+ * GStreamer Buffer Pool
*
* This is a partial implementation of GstBufferPool intended for internal use
* only. This pool cannot be configured or activated.
@@ -25,6 +25,4 @@ GstLibcameraPool *gst_libcamera_pool_new(GstLibcameraAllocator *allocator,
libcamera::Stream *gst_libcamera_pool_get_stream(GstLibcameraPool *self);
-libcamera::Stream *gst_libcamera_buffer_get_stream(GstBuffer *buffer);
-
libcamera::FrameBuffer *gst_libcamera_buffer_get_frame_buffer(GstBuffer *buffer);
diff --git a/src/gstreamer/gstlibcameraprovider.cpp b/src/gstreamer/gstlibcameraprovider.cpp
index 6eb0a0eb..4fb1b007 100644
--- a/src/gstreamer/gstlibcameraprovider.cpp
+++ b/src/gstreamer/gstlibcameraprovider.cpp
@@ -3,9 +3,11 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraprovider.c - GStreamer Device Provider
+ * GStreamer Device Provider
*/
+#include <array>
+
#include "gstlibcameraprovider.h"
#include <libcamera/camera.h>
@@ -31,6 +33,7 @@ GST_DEBUG_CATEGORY_STATIC(provider_debug);
enum {
PROP_DEVICE_NAME = 1,
+ PROP_AUTO_FOCUS_MODE = 2,
};
#define GST_TYPE_LIBCAMERA_DEVICE gst_libcamera_device_get_type()
@@ -40,6 +43,7 @@ G_DECLARE_FINAL_TYPE(GstLibcameraDevice, gst_libcamera_device,
struct _GstLibcameraDevice {
GstDevice parent;
gchar *name;
+ controls::AfModeEnum auto_focus_mode = controls::AfModeManual;
};
G_DEFINE_TYPE(GstLibcameraDevice, gst_libcamera_device, GST_TYPE_DEVICE)
@@ -56,6 +60,7 @@ gst_libcamera_device_create_element(GstDevice *device, const gchar *name)
g_assert(source);
g_object_set(source, "camera-name", GST_LIBCAMERA_DEVICE(device)->name, nullptr);
+ g_object_set(source, "auto-focus-mode", GST_LIBCAMERA_DEVICE(device)->auto_focus_mode, nullptr);
return source;
}
@@ -82,6 +87,9 @@ gst_libcamera_device_set_property(GObject *object, guint prop_id,
case PROP_DEVICE_NAME:
device->name = g_value_dup_string(value);
break;
+ case PROP_AUTO_FOCUS_MODE:
+ device->auto_focus_mode = static_cast<controls::AfModeEnum>(g_value_get_enum(value));
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
@@ -121,16 +129,24 @@ gst_libcamera_device_class_init(GstLibcameraDeviceClass *klass)
(GParamFlags)(G_PARAM_STATIC_STRINGS | G_PARAM_WRITABLE |
G_PARAM_CONSTRUCT_ONLY));
g_object_class_install_property(object_class, PROP_DEVICE_NAME, pspec);
+
+ pspec = g_param_spec_enum("auto-focus-mode",
+ "Set auto-focus mode",
+ "Available options: AfModeManual, "
+ "AfModeAuto or AfModeContinuous.",
+ gst_libcamera_auto_focus_get_type(),
+ static_cast<gint>(controls::AfModeManual),
+ G_PARAM_WRITABLE);
+ g_object_class_install_property(object_class, PROP_AUTO_FOCUS_MODE, pspec);
}
static GstDevice *
gst_libcamera_device_new(const std::shared_ptr<Camera> &camera)
{
+ static const std::array roles{ StreamRole::VideoRecording };
g_autoptr(GstCaps) caps = gst_caps_new_empty();
const gchar *name = camera->id().c_str();
- StreamRoles roles;
- roles.push_back(StreamRole::VideoRecording);
std::unique_ptr<CameraConfiguration> config = camera->generateConfiguration(roles);
if (!config || config->size() != roles.size()) {
GST_ERROR("Failed to generate a default configuration for %s", name);
diff --git a/src/gstreamer/gstlibcameraprovider.h b/src/gstreamer/gstlibcameraprovider.h
index aaceabf5..19708b9d 100644
--- a/src/gstreamer/gstlibcameraprovider.h
+++ b/src/gstreamer/gstlibcameraprovider.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraprovider.h - GStreamer Device Provider
+ * GStreamer Device Provider
*/
#pragma once
diff --git a/src/gstreamer/gstlibcamerasrc.cpp b/src/gstreamer/gstlibcamerasrc.cpp
index 46fd02d2..6a95b6af 100644
--- a/src/gstreamer/gstlibcamerasrc.cpp
+++ b/src/gstreamer/gstlibcamerasrc.cpp
@@ -3,16 +3,14 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerasrc.cpp - GStreamer Capture Element
+ * GStreamer Capture Element
*/
/**
* \todo The following is a list of items that needs implementation in the GStreamer plugin
* - Implement GstElement::send_event
- * + Allowing application to send EOS
* + Allowing application to use FLUSH/FLUSH_STOP
* + Prevent the main thread from accessing streaming thread
- * - Implement renegotiation (even if slow)
* - Implement GstElement::request-new-pad (multi stream)
* + Evaluate if a single streaming thread is fine
* - Add application driven request (snapshot)
@@ -29,11 +27,13 @@
#include "gstlibcamerasrc.h"
+#include <atomic>
#include <queue>
#include <vector>
#include <libcamera/camera.h>
#include <libcamera/camera_manager.h>
+#include <libcamera/control_ids.h>
#include <gst/base/base.h>
@@ -51,15 +51,18 @@ struct RequestWrap {
RequestWrap(std::unique_ptr<Request> request);
~RequestWrap();
- void attachBuffer(GstBuffer *buffer);
+ void attachBuffer(Stream *stream, GstBuffer *buffer);
GstBuffer *detachBuffer(Stream *stream);
std::unique_ptr<Request> request_;
std::map<Stream *, GstBuffer *> buffers_;
+
+ GstClockTime latency_;
+ GstClockTime pts_;
};
RequestWrap::RequestWrap(std::unique_ptr<Request> request)
- : request_(std::move(request))
+ : request_(std::move(request)), latency_(0), pts_(GST_CLOCK_TIME_NONE)
{
}
@@ -71,10 +74,9 @@ RequestWrap::~RequestWrap()
}
}
-void RequestWrap::attachBuffer(GstBuffer *buffer)
+void RequestWrap::attachBuffer(Stream *stream, GstBuffer *buffer)
{
FrameBuffer *fb = gst_libcamera_buffer_get_frame_buffer(buffer);
- Stream *stream = gst_libcamera_buffer_get_stream(buffer);
request_->addBuffer(stream, fb);
@@ -107,11 +109,30 @@ struct GstLibcameraSrcState {
std::shared_ptr<CameraManager> cm_;
std::shared_ptr<Camera> cam_;
std::unique_ptr<CameraConfiguration> config_;
- std::vector<GstPad *> srcpads_;
- std::queue<std::unique_ptr<RequestWrap>> requests_;
+
+ std::vector<GstPad *> srcpads_; /* Protected by stream_lock */
+
+ /*
+ * Contention on this lock_ must be minimized, as it has to be taken in
+ * the realtime-sensitive requestCompleted() handler to protect
+ * queuedRequests_ and completedRequests_.
+ *
+ * stream_lock must be taken before lock_ in contexts where both locks
+ * need to be taken. In particular, this means that the lock_ must not
+ * be held while calling into other graph elements (e.g. when calling
+ * gst_pad_query()).
+ */
+ GMutex lock_;
+ std::queue<std::unique_ptr<RequestWrap>> queuedRequests_;
+ std::queue<std::unique_ptr<RequestWrap>> completedRequests_;
+
+ ControlList initControls_;
guint group_id_;
+ int queueRequest();
void requestCompleted(Request *request);
+ int processRequest();
+ void clearRequests();
};
struct _GstLibcameraSrc {
@@ -121,6 +142,9 @@ struct _GstLibcameraSrc {
GstTask *task;
gchar *camera_name;
+ controls::AfModeEnum auto_focus_mode = controls::AfModeManual;
+
+ std::atomic<GstEvent *> pending_eos;
GstLibcameraSrcState *state;
GstLibcameraAllocator *allocator;
@@ -129,14 +153,15 @@ struct _GstLibcameraSrc {
enum {
PROP_0,
- PROP_CAMERA_NAME
+ PROP_CAMERA_NAME,
+ PROP_AUTO_FOCUS_MODE,
};
G_DEFINE_TYPE_WITH_CODE(GstLibcameraSrc, gst_libcamera_src, GST_TYPE_ELEMENT,
GST_DEBUG_CATEGORY_INIT(source_debug, "libcamerasrc", 0,
"libcamera Source"))
-#define TEMPLATE_CAPS GST_STATIC_CAPS("video/x-raw; image/jpeg")
+#define TEMPLATE_CAPS GST_STATIC_CAPS("video/x-raw; image/jpeg; video/x-bayer")
/* For the simple case, we have a src pad that is always present. */
GstStaticPadTemplate src_template = {
@@ -148,15 +173,59 @@ GstStaticPadTemplate request_src_template = {
"src_%u", GST_PAD_SRC, GST_PAD_REQUEST, TEMPLATE_CAPS
};
+/* Must be called with stream_lock held. */
+int GstLibcameraSrcState::queueRequest()
+{
+ std::unique_ptr<Request> request = cam_->createRequest();
+ if (!request)
+ return -ENOMEM;
+
+ std::unique_ptr<RequestWrap> wrap =
+ std::make_unique<RequestWrap>(std::move(request));
+
+ for (GstPad *srcpad : srcpads_) {
+ Stream *stream = gst_libcamera_pad_get_stream(srcpad);
+ GstLibcameraPool *pool = gst_libcamera_pad_get_pool(srcpad);
+ GstBuffer *buffer;
+ GstFlowReturn ret;
+
+ ret = gst_buffer_pool_acquire_buffer(GST_BUFFER_POOL(pool),
+ &buffer, nullptr);
+ if (ret != GST_FLOW_OK) {
+ /*
+ * RequestWrap has ownership of the request, and we
+ * won't be queueing this one due to lack of buffers.
+ */
+ return -ENOBUFS;
+ }
+
+ wrap->attachBuffer(stream, buffer);
+ }
+
+ GST_TRACE_OBJECT(src_, "Requesting buffers");
+ cam_->queueRequest(wrap->request_.get());
+
+ {
+ GLibLocker locker(&lock_);
+ queuedRequests_.push(std::move(wrap));
+ }
+
+ /* The RequestWrap will be deleted in the completion handler. */
+ return 0;
+}
+
void
GstLibcameraSrcState::requestCompleted(Request *request)
{
- GLibLocker lock(GST_OBJECT(src_));
-
GST_DEBUG_OBJECT(src_, "buffers are ready");
- std::unique_ptr<RequestWrap> wrap = std::move(requests_.front());
- requests_.pop();
+ std::unique_ptr<RequestWrap> wrap;
+
+ {
+ GLibLocker locker(&lock_);
+ wrap = std::move(queuedRequests_.front());
+ queuedRequests_.pop();
+ }
g_return_if_fail(wrap->request_.get() == request);
@@ -165,23 +234,61 @@ GstLibcameraSrcState::requestCompleted(Request *request)
return;
}
- GstBuffer *buffer;
+ if (GST_ELEMENT_CLOCK(src_)) {
+ int64_t timestamp = request->metadata().get(controls::SensorTimestamp).value_or(0);
+
+ GstClockTime gst_base_time = GST_ELEMENT(src_)->base_time;
+ GstClockTime gst_now = gst_clock_get_time(GST_ELEMENT_CLOCK(src_));
+ /* \todo Need to expose which reference clock the timestamp relates to. */
+ GstClockTime sys_now = g_get_monotonic_time() * 1000;
+
+ /* Deduced from: sys_now - sys_base_time == gst_now - gst_base_time */
+ GstClockTime sys_base_time = sys_now - (gst_now - gst_base_time);
+ wrap->pts_ = timestamp - sys_base_time;
+ wrap->latency_ = sys_now - timestamp;
+ }
+
+ {
+ GLibLocker locker(&lock_);
+ completedRequests_.push(std::move(wrap));
+ }
+
+ gst_task_resume(src_->task);
+}
+
+/* Must be called with stream_lock held. */
+int GstLibcameraSrcState::processRequest()
+{
+ std::unique_ptr<RequestWrap> wrap;
+ int err = 0;
+
+ {
+ GLibLocker locker(&lock_);
+
+ if (!completedRequests_.empty()) {
+ wrap = std::move(completedRequests_.front());
+ completedRequests_.pop();
+ }
+
+ if (completedRequests_.empty())
+ err = -ENOBUFS;
+ }
+
+ if (!wrap)
+ return -ENOBUFS;
+
+ GstFlowReturn ret = GST_FLOW_OK;
+ gst_flow_combiner_reset(src_->flow_combiner);
+
for (GstPad *srcpad : srcpads_) {
Stream *stream = gst_libcamera_pad_get_stream(srcpad);
- buffer = wrap->detachBuffer(stream);
+ GstBuffer *buffer = wrap->detachBuffer(stream);
FrameBuffer *fb = gst_libcamera_buffer_get_frame_buffer(buffer);
- if (GST_ELEMENT_CLOCK(src_)) {
- GstClockTime gst_base_time = GST_ELEMENT(src_)->base_time;
- GstClockTime gst_now = gst_clock_get_time(GST_ELEMENT_CLOCK(src_));
- /* \todo Need to expose which reference clock the timestamp relates to. */
- GstClockTime sys_now = g_get_monotonic_time() * 1000;
-
- /* Deduced from: sys_now - sys_base_time == gst_now - gst_base_time */
- GstClockTime sys_base_time = sys_now - (gst_now - gst_base_time);
- GST_BUFFER_PTS(buffer) = fb->metadata().timestamp - sys_base_time;
- gst_libcamera_pad_set_latency(srcpad, sys_now - fb->metadata().timestamp);
+ if (GST_CLOCK_TIME_IS_VALID(wrap->pts_)) {
+ GST_BUFFER_PTS(buffer) = wrap->pts_;
+ gst_libcamera_pad_set_latency(srcpad, wrap->latency_);
} else {
GST_BUFFER_PTS(buffer) = 0;
}
@@ -189,10 +296,60 @@ GstLibcameraSrcState::requestCompleted(Request *request)
GST_BUFFER_OFFSET(buffer) = fb->metadata().sequence;
GST_BUFFER_OFFSET_END(buffer) = fb->metadata().sequence;
- gst_libcamera_pad_queue_buffer(srcpad, buffer);
+ ret = gst_pad_push(srcpad, buffer);
+ ret = gst_flow_combiner_update_pad_flow(src_->flow_combiner,
+ srcpad, ret);
}
- gst_libcamera_resume_task(this->src_->task);
+ switch (ret) {
+ case GST_FLOW_OK:
+ break;
+
+ case GST_FLOW_NOT_NEGOTIATED: {
+ bool reconfigure = false;
+ for (GstPad *srcpad : srcpads_) {
+ if (gst_pad_needs_reconfigure(srcpad)) {
+ reconfigure = true;
+ break;
+ }
+ }
+
+ /* If no pads need a reconfiguration something went wrong. */
+ if (!reconfigure)
+ err = -EPIPE;
+
+ break;
+ }
+
+ case GST_FLOW_EOS: {
+ g_autoptr(GstEvent) eos = gst_event_new_eos();
+ guint32 seqnum = gst_util_seqnum_next();
+ gst_event_set_seqnum(eos, seqnum);
+ for (GstPad *srcpad : srcpads_)
+ gst_pad_push_event(srcpad, gst_event_ref(eos));
+
+ err = -EPIPE;
+ break;
+ }
+
+ case GST_FLOW_FLUSHING:
+ err = -EPIPE;
+ break;
+
+ default:
+ GST_ELEMENT_FLOW_ERROR(src_, ret);
+
+ err = -EPIPE;
+ break;
+ }
+
+ return err;
+}
+
+void GstLibcameraSrcState::clearRequests()
+{
+ GLibLocker locker(&lock_);
+ completedRequests_ = {};
}
static bool
@@ -220,10 +377,10 @@ gst_libcamera_src_open(GstLibcameraSrc *self)
}
if (camera_name) {
- cam = cm->get(self->camera_name);
+ cam = cm->get(camera_name);
if (!cam) {
GST_ELEMENT_ERROR(self, RESOURCE, NOT_FOUND,
- ("Could not find a camera named '%s'.", self->camera_name),
+ ("Could not find a camera named '%s'.", camera_name),
("libcamera::CameraMananger::get() returned nullptr"));
return false;
}
@@ -256,93 +413,195 @@ gst_libcamera_src_open(GstLibcameraSrc *self)
return true;
}
-static void
-gst_libcamera_src_task_run(gpointer user_data)
+/* Must be called with stream_lock held. */
+static bool
+gst_libcamera_src_negotiate(GstLibcameraSrc *self)
{
- GstLibcameraSrc *self = GST_LIBCAMERA_SRC(user_data);
GstLibcameraSrcState *state = self->state;
- std::unique_ptr<Request> request = state->cam_->createRequest();
- if (!request) {
+ g_autoptr(GstStructure) element_caps = gst_structure_new_empty("caps");
+
+ for (gsize i = 0; i < state->srcpads_.size(); i++) {
+ GstPad *srcpad = state->srcpads_[i];
+ StreamConfiguration &stream_cfg = state->config_->at(i);
+
+ /* Retrieve the supported caps. */
+ g_autoptr(GstCaps) filter = gst_libcamera_stream_formats_to_caps(stream_cfg.formats());
+ g_autoptr(GstCaps) caps = gst_pad_peer_query_caps(srcpad, filter);
+ if (gst_caps_is_empty(caps))
+ return false;
+
+ /* Fixate caps and configure the stream. */
+ caps = gst_caps_make_writable(caps);
+ gst_libcamera_configure_stream_from_caps(stream_cfg, caps);
+ gst_libcamera_get_framerate_from_caps(caps, element_caps);
+ }
+
+ /* Validate the configuration. */
+ if (state->config_->validate() == CameraConfiguration::Invalid)
+ return false;
+
+ int ret = state->cam_->configure(state->config_.get());
+ if (ret) {
+ GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
+ ("Failed to configure camera: %s", g_strerror(-ret)),
+ ("Camera::configure() failed with error code %i", ret));
+ return false;
+ }
+
+ /* Check frame duration bounds within controls::FrameDurationLimits */
+ gst_libcamera_clamp_and_set_frameduration(state->initControls_,
+ state->cam_->controls(), element_caps);
+
+ /*
+ * Regardless if it has been modified, create clean caps and push the
+ * caps event. Downstream will decide if the caps are acceptable.
+ */
+ for (gsize i = 0; i < state->srcpads_.size(); i++) {
+ GstPad *srcpad = state->srcpads_[i];
+ const StreamConfiguration &stream_cfg = state->config_->at(i);
+
+ g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg);
+ gst_libcamera_framerate_to_caps(caps, element_caps);
+
+ if (!gst_pad_push_event(srcpad, gst_event_new_caps(caps)))
+ return false;
+ }
+
+ if (self->allocator)
+ g_clear_object(&self->allocator);
+
+ self->allocator = gst_libcamera_allocator_new(state->cam_, state->config_.get());
+ if (!self->allocator) {
GST_ELEMENT_ERROR(self, RESOURCE, NO_SPACE_LEFT,
- ("Failed to allocate request for camera '%s'.",
- state->cam_->id().c_str()),
- ("libcamera::Camera::createRequest() failed"));
- gst_task_stop(self->task);
- return;
+ ("Failed to allocate memory"),
+ ("gst_libcamera_allocator_new() failed."));
+ return false;
}
- std::unique_ptr<RequestWrap> wrap =
- std::make_unique<RequestWrap>(std::move(request));
+ for (gsize i = 0; i < state->srcpads_.size(); i++) {
+ GstPad *srcpad = state->srcpads_[i];
+ const StreamConfiguration &stream_cfg = state->config_->at(i);
- for (GstPad *srcpad : state->srcpads_) {
- GstLibcameraPool *pool = gst_libcamera_pad_get_pool(srcpad);
- GstBuffer *buffer;
- GstFlowReturn ret;
+ GstLibcameraPool *pool = gst_libcamera_pool_new(self->allocator,
+ stream_cfg.stream());
+ g_signal_connect_swapped(pool, "buffer-notify",
+ G_CALLBACK(gst_task_resume), self->task);
- ret = gst_buffer_pool_acquire_buffer(GST_BUFFER_POOL(pool),
- &buffer, nullptr);
- if (ret != GST_FLOW_OK) {
- /*
- * RequestWrap has ownership of the request, and we
- * won't be queueing this one due to lack of buffers.
- */
- wrap.release();
- break;
- }
+ gst_libcamera_pad_set_pool(srcpad, pool);
- wrap->attachBuffer(buffer);
+ /* Clear all reconfigure flags. */
+ gst_pad_check_reconfigure(srcpad);
}
- if (wrap) {
- GLibLocker lock(GST_OBJECT(self));
- GST_TRACE_OBJECT(self, "Requesting buffers");
- state->cam_->queueRequest(wrap->request_.get());
- state->requests_.push(std::move(wrap));
+ return true;
+}
+
+static void
+gst_libcamera_src_task_run(gpointer user_data)
+{
+ GstLibcameraSrc *self = GST_LIBCAMERA_SRC(user_data);
+ GstLibcameraSrcState *state = self->state;
+
+ /*
+ * Start by pausing the task. The task may also get resumed by the
+ * buffer-notify signal when new buffers are queued back to the pool,
+ * or by the request completion handler when a new request has
+ * completed. Both will resume the task after adding the buffers or
+ * request to their respective lists, which are checked below to decide
+ * if the task needs to be resumed for another iteration. This is thus
+ * guaranteed to be race-free, the lock taken by gst_task_pause() and
+ * gst_task_resume() serves as a memory barrier.
+ */
+ gst_task_pause(self->task);
+
+ bool doResume = false;
- /* The RequestWrap will be deleted in the completion handler. */
+ g_autoptr(GstEvent) event = self->pending_eos.exchange(nullptr);
+ if (event) {
+ for (GstPad *srcpad : state->srcpads_)
+ gst_pad_push_event(srcpad, gst_event_ref(event));
+
+ return;
}
- GstFlowReturn ret = GST_FLOW_OK;
- gst_flow_combiner_reset(self->flow_combiner);
+ /* Check if a srcpad requested a renegotiation. */
+ bool reconfigure = false;
for (GstPad *srcpad : state->srcpads_) {
- ret = gst_libcamera_pad_push_pending(srcpad);
- ret = gst_flow_combiner_update_pad_flow(self->flow_combiner,
- srcpad, ret);
+ if (gst_pad_check_reconfigure(srcpad)) {
+ /* Check if the caps even need changing. */
+ g_autoptr(GstCaps) caps = gst_pad_get_current_caps(srcpad);
+ if (!gst_pad_peer_query_accept_caps(srcpad, caps)) {
+ reconfigure = true;
+ break;
+ }
+ }
}
- {
- if (ret != GST_FLOW_OK) {
- if (ret == GST_FLOW_EOS) {
- g_autoptr(GstEvent) eos = gst_event_new_eos();
- guint32 seqnum = gst_util_seqnum_next();
- gst_event_set_seqnum(eos, seqnum);
- for (GstPad *srcpad : state->srcpads_)
- gst_pad_push_event(srcpad, gst_event_ref(eos));
- } else if (ret != GST_FLOW_FLUSHING) {
- GST_ELEMENT_FLOW_ERROR(self, ret);
- }
+ if (reconfigure) {
+ state->cam_->stop();
+ state->clearRequests();
+
+ if (!gst_libcamera_src_negotiate(self)) {
+ GST_ELEMENT_FLOW_ERROR(self, GST_FLOW_NOT_NEGOTIATED);
gst_task_stop(self->task);
- return;
}
+ state->cam_->start(&state->initControls_);
+ }
+
+ /*
+ * Create and queue one request. If no buffers are available the
+ * function returns -ENOBUFS, which we ignore here as that's not a
+ * fatal error.
+ */
+ int ret = state->queueRequest();
+ switch (ret) {
+ case 0:
/*
- * Here we need to decide if we want to pause. This needs to
- * happen in lock step with the callback thread which may want
- * to resume the task and might push pending buffers.
+ * The request was successfully queued, there may be enough
+ * buffers to create a new one. Don't pause the task to give it
+ * another try.
*/
- GLibLocker lock(GST_OBJECT(self));
- bool do_pause = true;
- for (GstPad *srcpad : state->srcpads_) {
- if (gst_libcamera_pad_has_pending(srcpad)) {
- do_pause = false;
- break;
- }
- }
+ doResume = true;
+ break;
- if (do_pause)
- gst_task_pause(self->task);
+ case -ENOMEM:
+ GST_ELEMENT_ERROR(self, RESOURCE, NO_SPACE_LEFT,
+ ("Failed to allocate request for camera '%s'.",
+ state->cam_->id().c_str()),
+ ("libcamera::Camera::createRequest() failed"));
+ gst_task_stop(self->task);
+ return;
+
+ case -ENOBUFS:
+ default:
+ break;
}
+
+ /*
+ * Process one completed request, if available, and record if further
+ * requests are ready for processing.
+ */
+ ret = state->processRequest();
+ switch (ret) {
+ case 0:
+ /* Another completed request is available, resume the task. */
+ doResume = true;
+ break;
+
+ case -EPIPE:
+ gst_task_stop(self->task);
+ return;
+
+ case -ENOBUFS:
+ default:
+ break;
+ }
+
+ /* Resume the task for another iteration if needed. */
+ if (doResume)
+ gst_task_resume(self->task);
}
static void
@@ -352,13 +611,12 @@ gst_libcamera_src_task_enter(GstTask *task, [[maybe_unused]] GThread *thread,
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(user_data);
GLibRecLocker lock(&self->stream_lock);
GstLibcameraSrcState *state = self->state;
- GstFlowReturn flow_ret = GST_FLOW_OK;
gint ret;
GST_DEBUG_OBJECT(self, "Streaming thread has started");
gint stream_id_num = 0;
- StreamRoles roles;
+ std::vector<StreamRole> roles;
for (GstPad *srcpad : state->srcpads_) {
/* Create stream-id and push stream-start. */
g_autofree gchar *stream_id_intermediate = g_strdup_printf("%i%i", state->group_id_, stream_id_num++);
@@ -382,45 +640,16 @@ gst_libcamera_src_task_enter(GstTask *task, [[maybe_unused]] GThread *thread,
}
g_assert(state->config_->size() == state->srcpads_.size());
- for (gsize i = 0; i < state->srcpads_.size(); i++) {
- GstPad *srcpad = state->srcpads_[i];
- StreamConfiguration &stream_cfg = state->config_->at(i);
-
- /* Retrieve the supported caps. */
- g_autoptr(GstCaps) filter = gst_libcamera_stream_formats_to_caps(stream_cfg.formats());
- g_autoptr(GstCaps) caps = gst_pad_peer_query_caps(srcpad, filter);
- if (gst_caps_is_empty(caps)) {
- flow_ret = GST_FLOW_NOT_NEGOTIATED;
- break;
- }
-
- /* Fixate caps and configure the stream. */
- caps = gst_caps_make_writable(caps);
- gst_libcamera_configure_stream_from_caps(stream_cfg, caps);
- }
-
- if (flow_ret != GST_FLOW_OK)
- goto done;
-
- /* Validate the configuration. */
- if (state->config_->validate() == CameraConfiguration::Invalid) {
- flow_ret = GST_FLOW_NOT_NEGOTIATED;
- goto done;
+ if (!gst_libcamera_src_negotiate(self)) {
+ state->initControls_.clear();
+ GST_ELEMENT_FLOW_ERROR(self, GST_FLOW_NOT_NEGOTIATED);
+ gst_task_stop(task);
+ return;
}
- /*
- * Regardless if it has been modified, create clean caps and push the
- * caps event. Downstream will decide if the caps are acceptable.
- */
- for (gsize i = 0; i < state->srcpads_.size(); i++) {
- GstPad *srcpad = state->srcpads_[i];
- const StreamConfiguration &stream_cfg = state->config_->at(i);
-
- g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg);
- if (!gst_pad_push_event(srcpad, gst_event_new_caps(caps))) {
- flow_ret = GST_FLOW_NOT_NEGOTIATED;
- break;
- }
+ self->flow_combiner = gst_flow_combiner_new();
+ for (GstPad *srcpad : state->srcpads_) {
+ gst_flow_combiner_add_pad(self->flow_combiner, srcpad);
/* Send an open segment event with time format. */
GstSegment segment;
@@ -428,38 +657,19 @@ gst_libcamera_src_task_enter(GstTask *task, [[maybe_unused]] GThread *thread,
gst_pad_push_event(srcpad, gst_event_new_segment(&segment));
}
- ret = state->cam_->configure(state->config_.get());
- if (ret) {
- GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
- ("Failed to configure camera: %s", g_strerror(-ret)),
- ("Camera::configure() failed with error code %i", ret));
- gst_task_stop(task);
- return;
- }
-
- self->allocator = gst_libcamera_allocator_new(state->cam_, state->config_.get());
- if (!self->allocator) {
- GST_ELEMENT_ERROR(self, RESOURCE, NO_SPACE_LEFT,
- ("Failed to allocate memory"),
- ("gst_libcamera_allocator_new() failed."));
- gst_task_stop(task);
- return;
- }
-
- self->flow_combiner = gst_flow_combiner_new();
- for (gsize i = 0; i < state->srcpads_.size(); i++) {
- GstPad *srcpad = state->srcpads_[i];
- const StreamConfiguration &stream_cfg = state->config_->at(i);
- GstLibcameraPool *pool = gst_libcamera_pool_new(self->allocator,
- stream_cfg.stream());
- g_signal_connect_swapped(pool, "buffer-notify",
- G_CALLBACK(gst_libcamera_resume_task), task);
-
- gst_libcamera_pad_set_pool(srcpad, pool);
- gst_flow_combiner_add_pad(self->flow_combiner, srcpad);
+ if (self->auto_focus_mode != controls::AfModeManual) {
+ const ControlInfoMap &infoMap = state->cam_->controls();
+ if (infoMap.find(&controls::AfMode) != infoMap.end()) {
+ state->initControls_.set(controls::AfMode, self->auto_focus_mode);
+ } else {
+ GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
+ ("Failed to enable auto focus"),
+ ("AfMode not supported by this camera, "
+ "please retry with 'auto-focus-mode=AfModeManual'"));
+ }
}
- ret = state->cam_->start();
+ ret = state->cam_->start(&state->initControls_);
if (ret) {
GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
("Failed to start the camera: %s", g_strerror(-ret)),
@@ -467,16 +677,6 @@ gst_libcamera_src_task_enter(GstTask *task, [[maybe_unused]] GThread *thread,
gst_task_stop(task);
return;
}
-
-done:
- switch (flow_ret) {
- case GST_FLOW_NOT_NEGOTIATED:
- GST_ELEMENT_FLOW_ERROR(self, flow_ret);
- gst_task_stop(task);
- break;
- default:
- break;
- }
}
static void
@@ -490,9 +690,13 @@ gst_libcamera_src_task_leave([[maybe_unused]] GstTask *task,
GST_DEBUG_OBJECT(self, "Streaming thread is about to stop");
state->cam_->stop();
+ state->clearRequests();
- for (GstPad *srcpad : state->srcpads_)
- gst_libcamera_pad_set_pool(srcpad, nullptr);
+ {
+ GLibRecLocker locker(&self->stream_lock);
+ for (GstPad *srcpad : state->srcpads_)
+ gst_libcamera_pad_set_pool(srcpad, nullptr);
+ }
g_clear_object(&self->allocator);
g_clear_pointer(&self->flow_combiner,
@@ -532,6 +736,9 @@ gst_libcamera_src_set_property(GObject *object, guint prop_id,
g_free(self->camera_name);
self->camera_name = g_value_dup_string(value);
break;
+ case PROP_AUTO_FOCUS_MODE:
+ self->auto_focus_mode = static_cast<controls::AfModeEnum>(g_value_get_enum(value));
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
@@ -549,6 +756,9 @@ gst_libcamera_src_get_property(GObject *object, guint prop_id, GValue *value,
case PROP_CAMERA_NAME:
g_value_set_string(value, self->camera_name);
break;
+ case PROP_AUTO_FOCUS_MODE:
+ g_value_set_enum(value, static_cast<gint>(self->auto_focus_mode));
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
@@ -604,6 +814,27 @@ gst_libcamera_src_change_state(GstElement *element, GstStateChange transition)
return ret;
}
+static gboolean
+gst_libcamera_src_send_event(GstElement *element, GstEvent *event)
+{
+ GstLibcameraSrc *self = GST_LIBCAMERA_SRC(element);
+ gboolean ret = FALSE;
+
+ switch (GST_EVENT_TYPE(event)) {
+ case GST_EVENT_EOS: {
+ GstEvent *oldEvent = self->pending_eos.exchange(event);
+ gst_clear_event(&oldEvent);
+ ret = TRUE;
+ break;
+ }
+ default:
+ gst_event_unref(event);
+ break;
+ }
+
+ return ret;
+}
+
static void
gst_libcamera_src_finalize(GObject *object)
{
@@ -612,6 +843,7 @@ gst_libcamera_src_finalize(GObject *object)
g_rec_mutex_clear(&self->stream_lock);
g_clear_object(&self->task);
+ g_mutex_clear(&self->state->lock_);
g_free(self->camera_name);
delete self->state;
@@ -630,8 +862,12 @@ gst_libcamera_src_init(GstLibcameraSrc *self)
gst_task_set_leave_callback(self->task, gst_libcamera_src_task_leave, self, nullptr);
gst_task_set_lock(self->task, &self->stream_lock);
+ g_mutex_init(&state->lock_);
+
state->srcpads_.push_back(gst_pad_new_from_template(templ, "src"));
- gst_element_add_pad(GST_ELEMENT(self), state->srcpads_[0]);
+ gst_element_add_pad(GST_ELEMENT(self), state->srcpads_.back());
+
+ GST_OBJECT_FLAG_SET(self, GST_ELEMENT_FLAG_SOURCE);
/* C-style friend. */
state->src_ = self;
@@ -651,7 +887,7 @@ gst_libcamera_src_request_new_pad(GstElement *element, GstPadTemplate *templ,
g_object_ref_sink(pad);
if (gst_element_add_pad(element, pad)) {
- GLibLocker lock(GST_OBJECT(self));
+ GLibRecLocker lock(&self->stream_lock);
self->state->srcpads_.push_back(reinterpret_cast<GstPad *>(g_object_ref(pad)));
} else {
GST_ELEMENT_ERROR(element, STREAM, FAILED,
@@ -671,7 +907,7 @@ gst_libcamera_src_release_pad(GstElement *element, GstPad *pad)
GST_DEBUG_OBJECT(self, "Pad %" GST_PTR_FORMAT " being released", pad);
{
- GLibLocker lock(GST_OBJECT(self));
+ GLibRecLocker lock(&self->stream_lock);
std::vector<GstPad *> &pads = self->state->srcpads_;
auto begin_iterator = pads.begin();
auto end_iterator = pads.end();
@@ -698,6 +934,7 @@ gst_libcamera_src_class_init(GstLibcameraSrcClass *klass)
element_class->request_new_pad = gst_libcamera_src_request_new_pad;
element_class->release_pad = gst_libcamera_src_release_pad;
element_class->change_state = gst_libcamera_src_change_state;
+ element_class->send_event = gst_libcamera_src_send_event;
gst_element_class_set_metadata(element_class,
"libcamera Source", "Source/Video",
@@ -717,4 +954,13 @@ gst_libcamera_src_class_init(GstLibcameraSrcClass *klass)
| G_PARAM_READWRITE
| G_PARAM_STATIC_STRINGS));
g_object_class_install_property(object_class, PROP_CAMERA_NAME, spec);
+
+ spec = g_param_spec_enum("auto-focus-mode",
+ "Set auto-focus mode",
+ "Available options: AfModeManual, "
+ "AfModeAuto or AfModeContinuous.",
+ gst_libcamera_auto_focus_get_type(),
+ static_cast<gint>(controls::AfModeManual),
+ G_PARAM_WRITABLE);
+ g_object_class_install_property(object_class, PROP_AUTO_FOCUS_MODE, spec);
}
diff --git a/src/gstreamer/gstlibcamerasrc.h b/src/gstreamer/gstlibcamerasrc.h
index fdea2f10..fd1f8193 100644
--- a/src/gstreamer/gstlibcamerasrc.h
+++ b/src/gstreamer/gstlibcamerasrc.h
@@ -3,11 +3,13 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerasrc.h - GStreamer Capture Element
+ * GStreamer Capture Element
*/
#pragma once
+#include <libcamera/control_ids.h>
+
#include <gst/gst.h>
G_BEGIN_DECLS
@@ -17,3 +19,32 @@ G_DECLARE_FINAL_TYPE(GstLibcameraSrc, gst_libcamera_src,
GST_LIBCAMERA, SRC, GstElement)
G_END_DECLS
+
+inline GType
+gst_libcamera_auto_focus_get_type()
+{
+ static GType type = 0;
+ static const GEnumValue values[] = {
+ {
+ static_cast<gint>(libcamera::controls::AfModeManual),
+ "AfModeManual",
+ "manual-focus",
+ },
+ {
+ static_cast<gint>(libcamera::controls::AfModeAuto),
+ "AfModeAuto",
+ "automatic-auto-focus",
+ },
+ {
+ static_cast<gint>(libcamera::controls::AfModeContinuous),
+ "AfModeContinuous",
+ "continuous-auto-focus",
+ },
+ { 0, NULL, NULL }
+ };
+
+ if (!type)
+ type = g_enum_register_static("GstLibcameraAutoFocus", values);
+
+ return type;
+}
diff --git a/src/gstreamer/meson.build b/src/gstreamer/meson.build
index 77c79140..c2a01e7b 100644
--- a/src/gstreamer/meson.build
+++ b/src/gstreamer/meson.build
@@ -43,6 +43,18 @@ libcamera_gst = shared_library('gstlibcamera',
libcamera_gst_sources,
cpp_args : libcamera_gst_cpp_args,
dependencies : [libcamera_public, gstvideo_dep, gstallocator_dep],
- install: true,
+ install : true,
install_dir : '@0@/gstreamer-1.0'.format(get_option('libdir')),
)
+
+# Make the plugin visible to GStreamer inside meson devenv.
+fs = import('fs')
+gst_plugin_path = fs.parent(libcamera_gst.full_path())
+
+gst_env = environment()
+gst_env.prepend('GST_PLUGIN_PATH', gst_plugin_path)
+
+# Avoid polluting the system registry.
+gst_env.set('GST_REGISTRY', gst_plugin_path / 'registry.data')
+
+meson.add_devenv(gst_env)
diff --git a/src/ipa/ipa-sign-install.sh b/src/ipa/ipa-sign-install.sh
index bcedb8b5..71696d5a 100755
--- a/src/ipa/ipa-sign-install.sh
+++ b/src/ipa/ipa-sign-install.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipa-sign-install.sh - Regenerate IPA module signatures when installing
+# Regenerate IPA module signatures when installing
key=$1
shift
diff --git a/src/ipa/ipa-sign.sh b/src/ipa/ipa-sign.sh
index 8673dad1..69024213 100755
--- a/src/ipa/ipa-sign.sh
+++ b/src/ipa/ipa-sign.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipa-sign.sh - Generate a signature for an IPA module
+# Generate a signature for an IPA module
key="$1"
input="$2"
diff --git a/src/ipa/ipu3/algorithms/af.cpp b/src/ipa/ipu3/algorithms/af.cpp
index d07521a0..29eb7355 100644
--- a/src/ipa/ipu3/algorithms/af.cpp
+++ b/src/ipa/ipu3/algorithms/af.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Red Hat
*
- * af.cpp - IPU3 auto focus algorithm
+ * IPU3 auto focus algorithm
*/
#include "af.h"
@@ -114,19 +114,6 @@ Af::Af()
}
/**
- * \copydoc libcamera::ipa::Algorithm::prepare
- */
-void Af::prepare(IPAContext &context, ipu3_uapi_params *params)
-{
- const struct ipu3_uapi_grid_config &grid = context.configuration.af.afGrid;
- params->acc_param.af.grid_cfg = grid;
- params->acc_param.af.filter_config = afFilterConfigDefault;
-
- /* Enable AF processing block */
- params->use.acc_af = 1;
-}
-
-/**
* \brief Configure the Af given a configInfo
* \param[in] context The shared IPA context
* \param[in] configInfo The IPA configuration data
@@ -195,11 +182,27 @@ int Af::configure(IPAContext &context, const IPAConfigInfo &configInfo)
}
/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Af::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ ipu3_uapi_params *params)
+{
+ const struct ipu3_uapi_grid_config &grid = context.configuration.af.afGrid;
+ params->acc_param.af.grid_cfg = grid;
+ params->acc_param.af.filter_config = afFilterConfigDefault;
+
+ /* Enable AF processing block */
+ params->use.acc_af = 1;
+}
+
+/**
* \brief AF coarse scan
- *
- * Find a near focused image using a coarse step. The step is determined by coarseSearchStep.
- *
* \param[in] context The shared IPA context
+ *
+ * Find a near focused image using a coarse step. The step is determined by
+ * kCoarseSearchStep.
*/
void Af::afCoarseScan(IPAContext &context)
{
@@ -223,10 +226,9 @@ void Af::afCoarseScan(IPAContext &context)
/**
* \brief AF fine scan
+ * \param[in] context The shared IPA context
*
* Find an optimum lens position with moving 1 step for each search.
- *
- * \param[in] context The shared IPA context
*/
void Af::afFineScan(IPAContext &context)
{
@@ -244,10 +246,9 @@ void Af::afFineScan(IPAContext &context)
/**
* \brief AF reset
+ * \param[in] context The shared IPA context
*
* Reset all the parameters to start over the AF process.
- *
- * \param[in] context The shared IPA context
*/
void Af::afReset(IPAContext &context)
{
@@ -266,9 +267,9 @@ void Af::afReset(IPAContext &context)
}
/**
- * \brief AF variance comparison.
+ * \brief AF variance comparison
* \param[in] context The IPA context
- * \param min_step The VCM movement step.
+ * \param[in] min_step The VCM movement step
*
* We always pick the largest variance to replace the previous one. The image
* with a larger variance also indicates it is a clearer image than previous
@@ -321,7 +322,7 @@ bool Af::afScan(IPAContext &context, int min_step)
}
/**
- * \brief Determine the frame to be ignored.
+ * \brief Determine the frame to be ignored
* \return Return True if the frame should be ignored, false otherwise
*/
bool Af::afNeedIgnoreFrame()
@@ -334,7 +335,7 @@ bool Af::afNeedIgnoreFrame()
}
/**
- * \brief Reset frame ignore counter.
+ * \brief Reset frame ignore counter
*/
void Af::afIgnoreFrameReset()
{
@@ -343,9 +344,8 @@ void Af::afIgnoreFrameReset()
/**
* \brief Estimate variance
- * \param y_item The AF filter data set from the IPU3 statistics buffer
- * \param len The quantity of table item entries which are valid to process
- * \param isY1 Selects between filter Y1 or Y2 to calculate the variance
+ * \param[in] y_items The AF filter data set from the IPU3 statistics buffer
+ * \param[in] isY1 Selects between filter Y1 or Y2 to calculate the variance
*
* Calculate the mean of the data set provided by \a y_item, and then calculate
* the variance of that data set from the mean.
@@ -377,16 +377,16 @@ double Af::afEstimateVariance(Span<const y_table_item_t> y_items, bool isY1)
}
/**
- * \brief Determine out-of-focus situation.
- * \param context The IPA context.
+ * \brief Determine out-of-focus situation
+ * \param[in] context The IPA context
*
* Out-of-focus means that the variance change rate for a focused and a new
* variance is greater than a threshold.
*
* \return True if the variance threshold is crossed indicating lost focus,
- * false otherwise.
+ * false otherwise
*/
-bool Af::afIsOutOfFocus(IPAContext context)
+bool Af::afIsOutOfFocus(IPAContext &context)
{
const uint32_t diff_var = std::abs(currentVariance_ -
context.activeState.af.maxVariance);
@@ -404,10 +404,12 @@ bool Af::afIsOutOfFocus(IPAContext context)
}
/**
- * \brief Determine the max contrast image and lens position.
- * \param[in] context The IPA context.
+ * \brief Determine the max contrast image and lens position
+ * \param[in] context The IPA context
+ * \param[in] frame The frame context sequence number
* \param[in] frameContext The current frame context
- * \param[in] stats The statistics buffer of IPU3.
+ * \param[in] stats The statistics buffer of IPU3
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
*
* Ideally, a clear image also has a relatively higher contrast. So, every
* image for each focus step should be tested to find an optimal focus step.
@@ -420,8 +422,10 @@ bool Af::afIsOutOfFocus(IPAContext context)
*
* [1] Hill Climbing Algorithm, https://en.wikipedia.org/wiki/Hill_climbing
*/
-void Af::process(IPAContext &context, [[maybe_unused]] IPAFrameContext *frameContext,
- const ipu3_uapi_stats_3a *stats)
+void Af::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ [[maybe_unused]] ControlList &metadata)
{
/* Evaluate the AF buffer length */
uint32_t afRawBufferLen = context.configuration.af.afGrid.width *
@@ -450,6 +454,8 @@ void Af::process(IPAContext &context, [[maybe_unused]] IPAFrameContext *frameCon
}
}
+REGISTER_IPA_ALGORITHM(Af, "Af")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/af.h b/src/ipa/ipu3/algorithms/af.h
index ccf015f3..68126d46 100644
--- a/src/ipa/ipu3/algorithms/af.h
+++ b/src/ipa/ipu3/algorithms/af.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Red Hat
*
- * af.h - IPU3 Af algorithm
+ * IPU3 Af algorithm
*/
#pragma once
@@ -30,10 +30,14 @@ public:
Af();
~Af() = default;
- void prepare(IPAContext &context, ipu3_uapi_params *params) override;
int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
- void process(IPAContext &context, IPAFrameContext *frameContext,
- const ipu3_uapi_stats_3a *stats) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ ipu3_uapi_params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata) override;
private:
void afCoarseScan(IPAContext &context);
@@ -44,7 +48,7 @@ private:
void afIgnoreFrameReset();
double afEstimateVariance(Span<const y_table_item_t> y_items, bool isY1);
- bool afIsOutOfFocus(IPAContext context);
+ bool afIsOutOfFocus(IPAContext &context);
/* VCM step configuration. It is the current setting of the VCM step. */
uint32_t focus_;
diff --git a/src/ipa/ipu3/algorithms/agc.cpp b/src/ipa/ipu3/algorithms/agc.cpp
index f16be534..0e0114f6 100644
--- a/src/ipa/ipu3/algorithms/agc.cpp
+++ b/src/ipa/ipu3/algorithms/agc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * ipu3_agc.cpp - AGC/AEC mean-based control algorithm
+ * AGC/AEC mean-based control algorithm
*/
#include "agc.h"
@@ -14,6 +14,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include <libcamera/control_ids.h>
#include <libcamera/ipa/core_ipa_interface.h>
#include "libipa/histogram.h"
@@ -46,9 +47,8 @@ namespace ipa::ipu3::algorithms {
LOG_DEFINE_CATEGORY(IPU3Agc)
-/* Limits for analogue gain values */
+/* Minimum limit for analogue gain value */
static constexpr double kMinAnalogueGain = 1.0;
-static constexpr double kMaxAnalogueGain = 8.0;
/* \todo Honour the FrameDurationLimits control instead of hardcoding a limit */
static constexpr utils::Duration kMaxShutterSpeed = 60ms;
@@ -56,24 +56,32 @@ static constexpr utils::Duration kMaxShutterSpeed = 60ms;
/* Histogram constants */
static constexpr uint32_t knumHistogramBins = 256;
-/* Target value to reach for the top 2% of the histogram */
-static constexpr double kEvGainTarget = 0.5;
-
-/* Number of frames to wait before calculating stats on minimum exposure */
-static constexpr uint32_t kNumStartupFrames = 10;
+Agc::Agc()
+ : minShutterSpeed_(0s), maxShutterSpeed_(0s)
+{
+}
-/*
- * Relative luminance target.
+/**
+ * \brief Initialise the AGC algorithm from tuning files
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The YamlObject containing Agc tuning data
+ *
+ * This function calls the base class' tuningData parsers to discover which
+ * control values are supported.
*
- * It's a number that's chosen so that, when the camera points at a grey
- * target, the resulting image brightness is considered right.
+ * \return 0 on success or errors from the base class
*/
-static constexpr double kRelativeLuminanceTarget = 0.16;
-
-Agc::Agc()
- : frameCount_(0), minShutterSpeed_(0s),
- maxShutterSpeed_(0s), filteredExposure_(0s)
+int Agc::init(IPAContext &context, const YamlObject &tuningData)
{
+ int ret;
+
+ ret = parseTuningData(tuningData);
+ if (ret)
+ return ret;
+
+ context.ctrlMap.merge(controls());
+
+ return 0;
}
/**
@@ -90,180 +98,66 @@ int Agc::configure(IPAContext &context,
IPAActiveState &activeState = context.activeState;
stride_ = configuration.grid.stride;
+ bdsGrid_ = configuration.grid.bdsGrid;
minShutterSpeed_ = configuration.agc.minShutterSpeed;
maxShutterSpeed_ = std::min(configuration.agc.maxShutterSpeed,
kMaxShutterSpeed);
minAnalogueGain_ = std::max(configuration.agc.minAnalogueGain, kMinAnalogueGain);
- maxAnalogueGain_ = std::min(configuration.agc.maxAnalogueGain, kMaxAnalogueGain);
+ maxAnalogueGain_ = configuration.agc.maxAnalogueGain;
/* Configure the default exposure and gain. */
- activeState.agc.gain = std::max(minAnalogueGain_, kMinAnalogueGain);
+ activeState.agc.gain = minAnalogueGain_;
activeState.agc.exposure = 10ms / configuration.sensor.lineDuration;
- frameCount_ = 0;
+ context.activeState.agc.constraintMode = constraintModes().begin()->first;
+ context.activeState.agc.exposureMode = exposureModeHelpers().begin()->first;
+
+ /* \todo Run this again when FrameDurationLimits is passed in */
+ setLimits(minShutterSpeed_, maxShutterSpeed_, minAnalogueGain_,
+ maxAnalogueGain_);
+ resetFrameCount();
+
return 0;
}
-/**
- * \brief Estimate the mean value of the top 2% of the histogram
- * \param[in] stats The statistics computed by the ImgU
- * \param[in] grid The grid used to store the statistics in the IPU3
- * \return The mean value of the top 2% of the histogram
- */
-double Agc::measureBrightness(const ipu3_uapi_stats_3a *stats,
- const ipu3_uapi_grid_config &grid) const
+Histogram Agc::parseStatistics(const ipu3_uapi_stats_3a *stats,
+ const ipu3_uapi_grid_config &grid)
{
- /* Initialise the histogram array */
uint32_t hist[knumHistogramBins] = { 0 };
+ rgbTriples_.clear();
+
for (unsigned int cellY = 0; cellY < grid.height; cellY++) {
for (unsigned int cellX = 0; cellX < grid.width; cellX++) {
uint32_t cellPosition = cellY * stride_ + cellX;
const ipu3_uapi_awb_set_item *cell =
reinterpret_cast<const ipu3_uapi_awb_set_item *>(
- &stats->awb_raw_buffer.meta_data[cellPosition]
- );
+ &stats->awb_raw_buffer.meta_data[cellPosition]);
+
+ rgbTriples_.push_back({
+ cell->R_avg,
+ (cell->Gr_avg + cell->Gb_avg) / 2,
+ cell->B_avg
+ });
- uint8_t gr = cell->Gr_avg;
- uint8_t gb = cell->Gb_avg;
/*
* Store the average green value to estimate the
* brightness. Even the overexposed pixels are
* taken into account.
*/
- hist[(gr + gb) / 2]++;
+ hist[(cell->Gr_avg + cell->Gb_avg) / 2]++;
}
}
- /* Estimate the quantile mean of the top 2% of the histogram. */
- return Histogram(Span<uint32_t>(hist)).interQuantileMean(0.98, 1.0);
-}
-
-/**
- * \brief Apply a filter on the exposure value to limit the speed of changes
- * \param[in] exposureValue The target exposure from the AGC algorithm
- *
- * The speed of the filter is adaptive, and will produce the target quicker
- * during startup, or when the target exposure is within 20% of the most recent
- * filter output.
- *
- * \return The filtered exposure
- */
-utils::Duration Agc::filterExposure(utils::Duration exposureValue)
-{
- double speed = 0.2;
-
- /* Adapt instantly if we are in startup phase. */
- if (frameCount_ < kNumStartupFrames)
- speed = 1.0;
-
- /*
- * If we are close to the desired result, go faster to avoid making
- * multiple micro-adjustments.
- * \todo Make this customisable?
- */
- if (filteredExposure_ < 1.2 * exposureValue &&
- filteredExposure_ > 0.8 * exposureValue)
- speed = sqrt(speed);
-
- filteredExposure_ = speed * exposureValue +
- filteredExposure_ * (1.0 - speed);
-
- LOG(IPU3Agc, Debug) << "After filtering, exposure " << filteredExposure_;
-
- return filteredExposure_;
-}
-
-/**
- * \brief Estimate the new exposure and gain values
- * \param[inout] frameContext The shared IPA frame Context
- * \param[in] yGain The gain calculated based on the relative luminance target
- * \param[in] iqMeanGain The gain calculated based on the relative luminance target
- */
-void Agc::computeExposure(IPAContext &context, IPAFrameContext *frameContext,
- double yGain, double iqMeanGain)
-{
- const IPASessionConfiguration &configuration = context.configuration;
- /* Get the effective exposure and gain applied on the sensor. */
- uint32_t exposure = frameContext->sensor.exposure;
- double analogueGain = frameContext->sensor.gain;
-
- /* Use the highest of the two gain estimates. */
- double evGain = std::max(yGain, iqMeanGain);
-
- /* Consider within 1% of the target as correctly exposed */
- if (utils::abs_diff(evGain, 1.0) < 0.01)
- LOG(IPU3Agc, Debug) << "We are well exposed (evGain = "
- << evGain << ")";
-
- /* extracted from Rpi::Agc::computeTargetExposure */
-
- /* Calculate the shutter time in seconds */
- utils::Duration currentShutter = exposure * configuration.sensor.lineDuration;
-
- /*
- * Update the exposure value for the next computation using the values
- * of exposure and gain really used by the sensor.
- */
- utils::Duration effectiveExposureValue = currentShutter * analogueGain;
-
- LOG(IPU3Agc, Debug) << "Actual total exposure " << currentShutter * analogueGain
- << " Shutter speed " << currentShutter
- << " Gain " << analogueGain
- << " Needed ev gain " << evGain;
-
- /*
- * Calculate the current exposure value for the scene as the latest
- * exposure value applied multiplied by the new estimated gain.
- */
- utils::Duration exposureValue = effectiveExposureValue * evGain;
-
- /* Clamp the exposure value to the min and max authorized */
- utils::Duration maxTotalExposure = maxShutterSpeed_ * maxAnalogueGain_;
- exposureValue = std::min(exposureValue, maxTotalExposure);
- LOG(IPU3Agc, Debug) << "Target total exposure " << exposureValue
- << ", maximum is " << maxTotalExposure;
-
- /*
- * Filter the exposure.
- * \todo: estimate if we need to desaturate
- */
- exposureValue = filterExposure(exposureValue);
-
- /*
- * Divide the exposure value as new exposure and gain values.
- *
- * Push the shutter time up to the maximum first, and only then
- * increase the gain.
- */
- utils::Duration shutterTime =
- std::clamp<utils::Duration>(exposureValue / minAnalogueGain_,
- minShutterSpeed_, maxShutterSpeed_);
- double stepGain = std::clamp(exposureValue / shutterTime,
- minAnalogueGain_, maxAnalogueGain_);
- LOG(IPU3Agc, Debug) << "Divided up shutter and gain are "
- << shutterTime << " and "
- << stepGain;
-
- IPAActiveState &activeState = context.activeState;
- /* Update the estimated exposure and gain. */
- activeState.agc.exposure = shutterTime / configuration.sensor.lineDuration;
- activeState.agc.gain = stepGain;
+ return Histogram(Span<uint32_t>(hist));
}
/**
* \brief Estimate the relative luminance of the frame with a given gain
- * \param[in] frameContext The shared IPA frame context
- * \param[in] grid The grid used to store the statistics in the IPU3
- * \param[in] stats The IPU3 statistics and ISP results
- * \param[in] gain The gain to apply to the frame
- * \return The relative luminance
- *
- * This function estimates the average relative luminance of the frame that
- * would be output by the sensor if an additional \a gain was applied.
+ * \param[in] gain The gain to apply in estimating luminance
*
* The estimation is based on the AWB statistics for the current frame. Red,
* green and blue averages for all cells are first multiplied by the gain, and
@@ -278,91 +172,86 @@ void Agc::computeExposure(IPAContext &context, IPAFrameContext *frameContext,
*
* More detailed information can be found in:
* https://en.wikipedia.org/wiki/Relative_luminance
+ *
+ * \return The relative luminance of the frame
*/
-double Agc::estimateLuminance(IPAActiveState &activeState,
- const ipu3_uapi_grid_config &grid,
- const ipu3_uapi_stats_3a *stats,
- double gain)
+double Agc::estimateLuminance(double gain) const
{
double redSum = 0, greenSum = 0, blueSum = 0;
- /* Sum the per-channel averages, saturated to 255. */
- for (unsigned int cellY = 0; cellY < grid.height; cellY++) {
- for (unsigned int cellX = 0; cellX < grid.width; cellX++) {
- uint32_t cellPosition = cellY * stride_ + cellX;
-
- const ipu3_uapi_awb_set_item *cell =
- reinterpret_cast<const ipu3_uapi_awb_set_item *>(
- &stats->awb_raw_buffer.meta_data[cellPosition]
- );
- const uint8_t G_avg = (cell->Gr_avg + cell->Gb_avg) / 2;
-
- redSum += std::min(cell->R_avg * gain, 255.0);
- greenSum += std::min(G_avg * gain, 255.0);
- blueSum += std::min(cell->B_avg * gain, 255.0);
- }
+ for (unsigned int i = 0; i < rgbTriples_.size(); i++) {
+ redSum += std::min(std::get<0>(rgbTriples_[i]) * gain, 255.0);
+ greenSum += std::min(std::get<1>(rgbTriples_[i]) * gain, 255.0);
+ blueSum += std::min(std::get<2>(rgbTriples_[i]) * gain, 255.0);
}
- /*
- * Apply the AWB gains to approximate colours correctly, use the Rec.
- * 601 formula to calculate the relative luminance, and normalize it.
- */
- double ySum = redSum * activeState.awb.gains.red * 0.299
- + greenSum * activeState.awb.gains.green * 0.587
- + blueSum * activeState.awb.gains.blue * 0.114;
+ double ySum = redSum * rGain_ * 0.299
+ + greenSum * gGain_ * 0.587
+ + blueSum * bGain_ * 0.114;
- return ySum / (grid.height * grid.width) / 255;
+ return ySum / (bdsGrid_.height * bdsGrid_.width) / 255;
}
/**
* \brief Process IPU3 statistics, and run AGC operations
* \param[in] context The shared IPA context
+ * \param[in] frame The current frame sequence number
* \param[in] frameContext The current frame context
* \param[in] stats The IPU3 statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
*
* Identify the current image brightness, and use that to estimate the optimal
* new exposure and gain for the scene.
*/
-void Agc::process(IPAContext &context, [[maybe_unused]] IPAFrameContext *frameContext,
- const ipu3_uapi_stats_3a *stats)
+void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata)
{
- /*
- * Estimate the gain needed to have the proportion of pixels in a given
- * desired range. iqMean is the mean value of the top 2% of the
- * cumulative histogram, and we want it to be as close as possible to a
- * configured target.
- */
- double iqMean = measureBrightness(stats, context.configuration.grid.bdsGrid);
- double iqMeanGain = kEvGainTarget * knumHistogramBins / iqMean;
+ Histogram hist = parseStatistics(stats, context.configuration.grid.bdsGrid);
+ rGain_ = context.activeState.awb.gains.red;
+ gGain_ = context.activeState.awb.gains.blue;
+ bGain_ = context.activeState.awb.gains.green;
/*
- * Estimate the gain needed to achieve a relative luminance target. To
- * account for non-linearity caused by saturation, the value needs to be
- * estimated in an iterative process, as multiplying by a gain will not
- * increase the relative luminance by the same factor if some image
- * regions are saturated.
+ * The Agc algorithm needs to know the effective exposure value that was
+ * applied to the sensor when the statistics were collected.
*/
- double yGain = 1.0;
- double yTarget = kRelativeLuminanceTarget;
-
- for (unsigned int i = 0; i < 8; i++) {
- double yValue = estimateLuminance(context.activeState,
- context.configuration.grid.bdsGrid,
- stats, yGain);
- double extraGain = std::min(10.0, yTarget / (yValue + .001));
-
- yGain *= extraGain;
- LOG(IPU3Agc, Debug) << "Y value: " << yValue
- << ", Y target: " << yTarget
- << ", gives gain " << yGain;
- if (extraGain < 1.01)
- break;
- }
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ double analogueGain = frameContext.sensor.gain;
+ utils::Duration effectiveExposureValue = exposureTime * analogueGain;
+
+ utils::Duration shutterTime;
+ double aGain, dGain;
+ std::tie(shutterTime, aGain, dGain) =
+ calculateNewEv(context.activeState.agc.constraintMode,
+ context.activeState.agc.exposureMode, hist,
+ effectiveExposureValue);
+
+ LOG(IPU3Agc, Debug)
+ << "Divided up shutter, analogue gain and digital gain are "
+ << shutterTime << ", " << aGain << " and " << dGain;
+
+ IPAActiveState &activeState = context.activeState;
+ /* Update the estimated exposure and gain. */
+ activeState.agc.exposure = shutterTime / context.configuration.sensor.lineDuration;
+ activeState.agc.gain = aGain;
+
+ metadata.set(controls::AnalogueGain, frameContext.sensor.gain);
+ metadata.set(controls::ExposureTime, exposureTime.get<std::micro>());
+
+ /* \todo Use VBlank value calculated from each frame exposure. */
+ uint32_t vTotal = context.configuration.sensor.size.height
+ + context.configuration.sensor.defVBlank;
+ utils::Duration frameDuration = context.configuration.sensor.lineDuration
+ * vTotal;
+ metadata.set(controls::FrameDuration, frameDuration.get<std::micro>());
- computeExposure(context, frameContext, yGain, iqMeanGain);
- frameCount_++;
}
+REGISTER_IPA_ALGORITHM(Agc, "Agc")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/agc.h b/src/ipa/ipu3/algorithms/agc.h
index 105ae0f2..411f4da0 100644
--- a/src/ipa/ipu3/algorithms/agc.h
+++ b/src/ipa/ipu3/algorithms/agc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * agc.h - IPU3 AGC/AEC mean-based control algorithm
+ * IPU3 AGC/AEC mean-based control algorithm
*/
#pragma once
@@ -13,6 +13,9 @@
#include <libcamera/geometry.h>
+#include "libipa/agc_mean_luminance.h"
+#include "libipa/histogram.h"
+
#include "algorithm.h"
namespace libcamera {
@@ -21,28 +24,23 @@ struct IPACameraSensorInfo;
namespace ipa::ipu3::algorithms {
-class Agc : public Algorithm
+class Agc : public Algorithm, public AgcMeanLuminance
{
public:
Agc();
~Agc() = default;
+ int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
- void process(IPAContext &context, IPAFrameContext *frameContext,
- const ipu3_uapi_stats_3a *stats) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata) override;
private:
- double measureBrightness(const ipu3_uapi_stats_3a *stats,
- const ipu3_uapi_grid_config &grid) const;
- utils::Duration filterExposure(utils::Duration currentExposure);
- void computeExposure(IPAContext &context, IPAFrameContext *frameContext,
- double yGain, double iqMeanGain);
- double estimateLuminance(IPAActiveState &activeState,
- const ipu3_uapi_grid_config &grid,
- const ipu3_uapi_stats_3a *stats,
- double gain);
-
- uint64_t frameCount_;
+ double estimateLuminance(double gain) const override;
+ Histogram parseStatistics(const ipu3_uapi_stats_3a *stats,
+ const ipu3_uapi_grid_config &grid);
utils::Duration minShutterSpeed_;
utils::Duration maxShutterSpeed_;
@@ -50,9 +48,12 @@ private:
double minAnalogueGain_;
double maxAnalogueGain_;
- utils::Duration filteredExposure_;
-
uint32_t stride_;
+ double rGain_;
+ double gGain_;
+ double bGain_;
+ ipu3_uapi_grid_config bdsGrid_;
+ std::vector<std::tuple<uint8_t, uint8_t, uint8_t>> rgbTriples_;
};
} /* namespace ipa::ipu3::algorithms */
diff --git a/src/ipa/ipu3/algorithms/algorithm.h b/src/ipa/ipu3/algorithms/algorithm.h
index ae134a94..c7801f93 100644
--- a/src/ipa/ipu3/algorithms/algorithm.h
+++ b/src/ipa/ipu3/algorithms/algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.h - IPU3 control algorithm interface
+ * IPU3 control algorithm interface
*/
#pragma once
diff --git a/src/ipa/ipu3/algorithms/awb.cpp b/src/ipa/ipu3/algorithms/awb.cpp
index 70426722..4d6e3994 100644
--- a/src/ipa/ipu3/algorithms/awb.cpp
+++ b/src/ipa/ipu3/algorithms/awb.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * awb.cpp - AWB control algorithm
+ * AWB control algorithm
*/
#include "awb.h"
@@ -11,6 +11,8 @@
#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
/**
* \file awb.h
*/
@@ -216,6 +218,89 @@ int Awb::configure(IPAContext &context,
return 0;
}
+constexpr uint16_t Awb::threshold(float value)
+{
+ /* AWB thresholds are in the range [0, 8191] */
+ return value * 8191;
+}
+
+constexpr uint16_t Awb::gainValue(double gain)
+{
+ /*
+ * The colour gains applied by the BNR for the four channels (Gr, R, B
+ * and Gb) are expressed in the parameters structure as 16-bit integers
+ * that store a fixed-point U3.13 value in the range [0, 8[.
+ *
+ * The real gain value is equal to the gain parameter plus one, i.e.
+ *
+ * Pout = Pin * (1 + gain / 8192)
+ *
+ * where 'Pin' is the input pixel value, 'Pout' the output pixel value,
+ * and 'gain' the gain in the parameters structure as a 16-bit integer.
+ */
+ return std::clamp((gain - 1.0) * 8192, 0.0, 65535.0);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Awb::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ ipu3_uapi_params *params)
+{
+ /*
+ * Green saturation thresholds are reduced because we are using the
+ * green channel only in the exposure computation.
+ */
+ params->acc_param.awb.config.rgbs_thr_r = threshold(1.0);
+ params->acc_param.awb.config.rgbs_thr_gr = threshold(0.9);
+ params->acc_param.awb.config.rgbs_thr_gb = threshold(0.9);
+ params->acc_param.awb.config.rgbs_thr_b = threshold(1.0);
+
+ /*
+ * Enable saturation inclusion on thr_b for ImgU to update the
+ * ipu3_uapi_awb_set_item->sat_ratio field.
+ */
+ params->acc_param.awb.config.rgbs_thr_b |= IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT |
+ IPU3_UAPI_AWB_RGBS_THR_B_EN;
+
+ const ipu3_uapi_grid_config &grid = context.configuration.grid.bdsGrid;
+
+ params->acc_param.awb.config.grid = context.configuration.grid.bdsGrid;
+
+ /*
+ * Optical center is column start (respectively row start) of the
+ * cell of interest minus its X center (respectively Y center).
+ *
+ * For the moment use BDS as a first approximation, but it should
+ * be calculated based on Shading (SHD) parameters.
+ */
+ params->acc_param.bnr = imguCssBnrDefaults;
+ Size &bdsOutputSize = context.configuration.grid.bdsOutputSize;
+ params->acc_param.bnr.column_size = bdsOutputSize.width;
+ params->acc_param.bnr.opt_center.x_reset = grid.x_start - (bdsOutputSize.width / 2);
+ params->acc_param.bnr.opt_center.y_reset = grid.y_start - (bdsOutputSize.height / 2);
+ params->acc_param.bnr.opt_center_sqr.x_sqr_reset = params->acc_param.bnr.opt_center.x_reset
+ * params->acc_param.bnr.opt_center.x_reset;
+ params->acc_param.bnr.opt_center_sqr.y_sqr_reset = params->acc_param.bnr.opt_center.y_reset
+ * params->acc_param.bnr.opt_center.y_reset;
+
+ params->acc_param.bnr.wb_gains.gr = gainValue(context.activeState.awb.gains.green);
+ params->acc_param.bnr.wb_gains.r = gainValue(context.activeState.awb.gains.red);
+ params->acc_param.bnr.wb_gains.b = gainValue(context.activeState.awb.gains.blue);
+ params->acc_param.bnr.wb_gains.gb = gainValue(context.activeState.awb.gains.green);
+
+ LOG(IPU3Awb, Debug) << "Color temperature estimated: " << asyncResults_.temperatureK;
+
+ /* The CCM matrix may change when color temperature will be used */
+ params->acc_param.ccm = imguCssCcmDefault;
+
+ params->use.acc_awb = 1;
+ params->use.acc_bnr = 1;
+ params->use.acc_ccm = 1;
+}
+
/**
* The function estimates the correlated color temperature using
* from RGB color space input.
@@ -387,8 +472,10 @@ void Awb::calculateWBGains(const ipu3_uapi_stats_3a *stats)
/**
* \copydoc libcamera::ipa::Algorithm::process
*/
-void Awb::process(IPAContext &context, [[maybe_unused]] IPAFrameContext *frameContext,
- const ipu3_uapi_stats_3a *stats)
+void Awb::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ [[maybe_unused]] ControlList &metadata)
{
calculateWBGains(stats);
@@ -401,87 +488,17 @@ void Awb::process(IPAContext &context, [[maybe_unused]] IPAFrameContext *frameCo
context.activeState.awb.gains.green = asyncResults_.greenGain;
context.activeState.awb.gains.red = asyncResults_.redGain;
context.activeState.awb.temperatureK = asyncResults_.temperatureK;
-}
-constexpr uint16_t Awb::threshold(float value)
-{
- /* AWB thresholds are in the range [0, 8191] */
- return value * 8191;
+ metadata.set(controls::AwbEnable, true);
+ metadata.set(controls::ColourGains, {
+ static_cast<float>(context.activeState.awb.gains.red),
+ static_cast<float>(context.activeState.awb.gains.blue)
+ });
+ metadata.set(controls::ColourTemperature,
+ context.activeState.awb.temperatureK);
}
-constexpr uint16_t Awb::gainValue(double gain)
-{
- /*
- * The colour gains applied by the BNR for the four channels (Gr, R, B
- * and Gb) are expressed in the parameters structure as 16-bit integers
- * that store a fixed-point U3.13 value in the range [0, 8[.
- *
- * The real gain value is equal to the gain parameter plus one, i.e.
- *
- * Pout = Pin * (1 + gain / 8192)
- *
- * where 'Pin' is the input pixel value, 'Pout' the output pixel value,
- * and 'gain' the gain in the parameters structure as a 16-bit integer.
- */
- return std::clamp((gain - 1.0) * 8192, 0.0, 65535.0);
-}
-
-/**
- * \copydoc libcamera::ipa::Algorithm::prepare
- */
-void Awb::prepare(IPAContext &context, ipu3_uapi_params *params)
-{
- /*
- * Green saturation thresholds are reduced because we are using the
- * green channel only in the exposure computation.
- */
- params->acc_param.awb.config.rgbs_thr_r = threshold(1.0);
- params->acc_param.awb.config.rgbs_thr_gr = threshold(0.9);
- params->acc_param.awb.config.rgbs_thr_gb = threshold(0.9);
- params->acc_param.awb.config.rgbs_thr_b = threshold(1.0);
-
- /*
- * Enable saturation inclusion on thr_b for ImgU to update the
- * ipu3_uapi_awb_set_item->sat_ratio field.
- */
- params->acc_param.awb.config.rgbs_thr_b |= IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT |
- IPU3_UAPI_AWB_RGBS_THR_B_EN;
-
- const ipu3_uapi_grid_config &grid = context.configuration.grid.bdsGrid;
-
- params->acc_param.awb.config.grid = context.configuration.grid.bdsGrid;
-
- /*
- * Optical center is column start (respectively row start) of the
- * cell of interest minus its X center (respectively Y center).
- *
- * For the moment use BDS as a first approximation, but it should
- * be calculated based on Shading (SHD) parameters.
- */
- params->acc_param.bnr = imguCssBnrDefaults;
- Size &bdsOutputSize = context.configuration.grid.bdsOutputSize;
- params->acc_param.bnr.column_size = bdsOutputSize.width;
- params->acc_param.bnr.opt_center.x_reset = grid.x_start - (bdsOutputSize.width / 2);
- params->acc_param.bnr.opt_center.y_reset = grid.y_start - (bdsOutputSize.height / 2);
- params->acc_param.bnr.opt_center_sqr.x_sqr_reset = params->acc_param.bnr.opt_center.x_reset
- * params->acc_param.bnr.opt_center.x_reset;
- params->acc_param.bnr.opt_center_sqr.y_sqr_reset = params->acc_param.bnr.opt_center.y_reset
- * params->acc_param.bnr.opt_center.y_reset;
-
- params->acc_param.bnr.wb_gains.gr = gainValue(context.activeState.awb.gains.green);
- params->acc_param.bnr.wb_gains.r = gainValue(context.activeState.awb.gains.red);
- params->acc_param.bnr.wb_gains.b = gainValue(context.activeState.awb.gains.blue);
- params->acc_param.bnr.wb_gains.gb = gainValue(context.activeState.awb.gains.green);
-
- LOG(IPU3Awb, Debug) << "Color temperature estimated: " << asyncResults_.temperatureK;
-
- /* The CCM matrix may change when color temperature will be used */
- params->acc_param.ccm = imguCssCcmDefault;
-
- params->use.acc_awb = 1;
- params->use.acc_bnr = 1;
- params->use.acc_ccm = 1;
-}
+REGISTER_IPA_ALGORITHM(Awb, "Awb")
} /* namespace ipa::ipu3::algorithms */
diff --git a/src/ipa/ipu3/algorithms/awb.h b/src/ipa/ipu3/algorithms/awb.h
index 0acd2148..c0202823 100644
--- a/src/ipa/ipu3/algorithms/awb.h
+++ b/src/ipa/ipu3/algorithms/awb.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * awb.h - IPU3 AWB control algorithm
+ * IPU3 AWB control algorithm
*/
#pragma once
@@ -39,9 +39,13 @@ public:
~Awb();
int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
- void prepare(IPAContext &context, ipu3_uapi_params *params) override;
- void process(IPAContext &context, IPAFrameContext *frameContext,
- const ipu3_uapi_stats_3a *stats) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ ipu3_uapi_params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata) override;
private:
/* \todo Make these structs available to all the ISPs ? */
diff --git a/src/ipa/ipu3/algorithms/blc.cpp b/src/ipa/ipu3/algorithms/blc.cpp
index 78ab7bff..257f40e2 100644
--- a/src/ipa/ipu3/algorithms/blc.cpp
+++ b/src/ipa/ipu3/algorithms/blc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google inc.
*
- * blc.cpp - IPU3 Black Level Correction control
+ * IPU3 Black Level Correction control
*/
#include "blc.h"
@@ -38,14 +38,18 @@ BlackLevelCorrection::BlackLevelCorrection()
/**
* \brief Fill in the parameter structure, and enable black level correction
- * \param context The shared IPA context
- * \param params The IPU3 parameters
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The FrameContext for this frame
+ * \param[out] params The IPU3 parameters
*
* Populate the IPU3 parameter structure with the correction values for each
* channel and enable the corresponding ImgU block processing.
*/
void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
- ipu3_uapi_params *params)
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ ipu3_uapi_params *params)
{
/*
* The Optical Black Level correction values
@@ -62,6 +66,8 @@ void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
params->use.obgrid_param = 1;
}
+REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/blc.h b/src/ipa/ipu3/algorithms/blc.h
index d8da1748..62748045 100644
--- a/src/ipa/ipu3/algorithms/blc.h
+++ b/src/ipa/ipu3/algorithms/blc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google inc.
*
- * black_correction.h - IPU3 Black Level Correction control
+ * IPU3 Black Level Correction control
*/
#pragma once
@@ -18,7 +18,9 @@ class BlackLevelCorrection : public Algorithm
public:
BlackLevelCorrection();
- void prepare(IPAContext &context, ipu3_uapi_params *params) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ ipu3_uapi_params *params) override;
};
} /* namespace ipa::ipu3::algorithms */
diff --git a/src/ipa/ipu3/algorithms/tone_mapping.cpp b/src/ipa/ipu3/algorithms/tone_mapping.cpp
index f86e79b2..160338c1 100644
--- a/src/ipa/ipu3/algorithms/tone_mapping.cpp
+++ b/src/ipa/ipu3/algorithms/tone_mapping.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google inc.
*
- * tone_mapping.cpp - IPU3 ToneMapping and Gamma control
+ * IPU3 ToneMapping and Gamma control
*/
#include "tone_mapping.h"
@@ -49,13 +49,17 @@ int ToneMapping::configure(IPAContext &context,
/**
* \brief Fill in the parameter structure, and enable gamma control
- * \param context The shared IPA context
- * \param params The IPU3 parameters
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The FrameContext for this frame
+ * \param[out] params The IPU3 parameters
*
* Populate the IPU3 parameter structure with our tone mapping look up table and
* enable the gamma control module in the processing blocks.
*/
void ToneMapping::prepare([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
ipu3_uapi_params *params)
{
/* Copy the calculated LUT into the parameters buffer. */
@@ -71,15 +75,19 @@ void ToneMapping::prepare([[maybe_unused]] IPAContext &context,
/**
* \brief Calculate the tone mapping look up table
- * \param context The shared IPA context
- * \param frameContext The current frame context
- * \param stats The IPU3 statistics and ISP results
+ * \param[in] context The shared IPA context
+ * \param[in] frame The current frame sequence number
+ * \param[in] frameContext The current frame context
+ * \param[in] stats The IPU3 statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
*
* The tone mapping look up table is generated as an inverse power curve from
* our gamma setting.
*/
-void ToneMapping::process(IPAContext &context, [[maybe_unused]] IPAFrameContext *frameContext,
- [[maybe_unused]] const ipu3_uapi_stats_3a *stats)
+void ToneMapping::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] const ipu3_uapi_stats_3a *stats,
+ [[maybe_unused]] ControlList &metadata)
{
/*
* Hardcode gamma to 1.1 as a default for now.
@@ -105,6 +113,8 @@ void ToneMapping::process(IPAContext &context, [[maybe_unused]] IPAFrameContext
context.activeState.toneMapping.gamma = gamma_;
}
+REGISTER_IPA_ALGORITHM(ToneMapping, "ToneMapping")
+
} /* namespace ipa::ipu3::algorithms */
} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/tone_mapping.h b/src/ipa/ipu3/algorithms/tone_mapping.h
index d7d48006..b2b38010 100644
--- a/src/ipa/ipu3/algorithms/tone_mapping.h
+++ b/src/ipa/ipu3/algorithms/tone_mapping.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google inc.
*
- * tone_mapping.h - IPU3 ToneMapping and Gamma control
+ * IPU3 ToneMapping and Gamma control
*/
#pragma once
@@ -19,9 +19,12 @@ public:
ToneMapping();
int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
- void prepare(IPAContext &context, ipu3_uapi_params *params) override;
- void process(IPAContext &context, IPAFrameContext *frameContext,
- const ipu3_uapi_stats_3a *stats) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, ipu3_uapi_params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata) override;
private:
double gamma_;
diff --git a/src/ipa/ipu3/data/meson.build b/src/ipa/ipu3/data/meson.build
new file mode 100644
index 00000000..0f7cd5c6
--- /dev/null
+++ b/src/ipa/ipu3/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'uncalibrated.yaml',
+])
+
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'ipu3',
+ install_tag : 'runtime')
diff --git a/src/ipa/ipu3/data/uncalibrated.yaml b/src/ipa/ipu3/data/uncalibrated.yaml
new file mode 100644
index 00000000..794ab3ed
--- /dev/null
+++ b/src/ipa/ipu3/data/uncalibrated.yaml
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Af:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ - ToneMapping:
+...
diff --git a/src/ipa/ipu3/ipa_context.cpp b/src/ipa/ipu3/ipa_context.cpp
index 13cdb835..917d0654 100644
--- a/src/ipa/ipu3/ipa_context.cpp
+++ b/src/ipa/ipu3/ipa_context.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * ipa_context.cpp - IPU3 IPA Context
+ * IPU3 IPA Context
*/
#include "ipa_context.h"
@@ -36,22 +36,6 @@ namespace libcamera::ipa::ipu3 {
*/
/**
- * \struct IPAFrameContext
- * \brief Context for a frame
- *
- * The frame context stores data specific to a single frame processed by the
- * IPA. Each frame processed by the IPA has a context associated with it,
- * accessible through the IPAContext structure.
- *
- * Fields in the frame context should reflect values and controls
- * associated with the specific frame as requested by the application, and
- * as configured by the hardware. Fields can be read by algorithms to
- * determine if they should update any specific action for this frame, and
- * finally to update the metadata control lists when the frame is fully
- * completed.
- */
-
-/**
* \struct IPAContext
* \brief Global IPA context data shared between all algorithms
*
@@ -63,6 +47,9 @@ namespace libcamera::ipa::ipu3 {
*
* \var IPAContext::activeState
* \brief The current state of IPA algorithms
+ *
+ * \var IPAContext::ctrlMap
+ * \brief A ControlInfoMap::Map of controls populated by the algorithms
*/
/**
@@ -84,22 +71,21 @@ namespace libcamera::ipa::ipu3 {
* \brief AF grid configuration of the IPA
*
* \var IPASessionConfiguration::af.afGrid
- * \brief AF scene grid configuration.
+ * \brief AF scene grid configuration
*/
/**
* \var IPAActiveState::af
* \brief Context for the Automatic Focus algorithm
*
- * \struct IPAActiveState::af
* \var IPAActiveState::af.focus
* \brief Current position of the lens
*
* \var IPAActiveState::af.maxVariance
- * \brief The maximum variance of the current image.
+ * \brief The maximum variance of the current image
*
* \var IPAActiveState::af.stable
- * \brief It is set to true, if the best focus is found.
+ * \brief It is set to true, if the best focus is found
*/
/**
@@ -128,6 +114,9 @@ namespace libcamera::ipa::ipu3 {
*
* \var IPASessionConfiguration::sensor.defVBlank
* \brief The default vblank value of the sensor
+ *
+ * \var IPASessionConfiguration::sensor.size
+ * \brief Sensor output resolution
*/
/**
@@ -150,7 +139,7 @@ namespace libcamera::ipa::ipu3 {
* \var IPAActiveState::awb
* \brief Context for the Automatic White Balance algorithm
*
- * \struct IPAActiveState::awb.gains
+ * \var IPAActiveState::awb.gains
* \brief White balance gains
*
* \var IPAActiveState::awb.gains.red
@@ -181,25 +170,8 @@ namespace libcamera::ipa::ipu3 {
*/
/**
- * \brief Default constructor for IPAFrameContext
- */
-IPAFrameContext::IPAFrameContext() = default;
-
-/**
- * \brief Construct a IPAFrameContext instance
- */
-IPAFrameContext::IPAFrameContext(uint32_t id, const ControlList &reqControls)
- : frame(id), frameControls(reqControls)
-{
- sensor = {};
-}
-
-/**
- * \var IPAFrameContext::frame
- * \brief The frame number
- *
- * \var IPAFrameContext::frameControls
- * \brief Controls sent in by the application while queuing the request
+ * \struct IPAFrameContext
+ * \brief IPU3-specific FrameContext
*
* \var IPAFrameContext::sensor
* \brief Effective sensor values that were applied for the frame
diff --git a/src/ipa/ipu3/ipa_context.h b/src/ipa/ipu3/ipa_context.h
index 42e11141..c85d1e34 100644
--- a/src/ipa/ipu3/ipa_context.h
+++ b/src/ipa/ipu3/ipa_context.h
@@ -2,14 +2,12 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * ipa_context.h - IPU3 IPA Context
+ * IPU3 IPA Context
*
*/
#pragma once
-#include <array>
-
#include <linux/intel-ipu3.h>
#include <libcamera/base/utils.h>
@@ -17,13 +15,12 @@
#include <libcamera/controls.h>
#include <libcamera/geometry.h>
+#include <libipa/fc_queue.h>
+
namespace libcamera {
namespace ipa::ipu3 {
-/* Maximum number of frame contexts to be held */
-static constexpr uint32_t kMaxFrameContexts = 16;
-
struct IPASessionConfiguration {
struct {
ipu3_uapi_grid_config bdsGrid;
@@ -45,6 +42,7 @@ struct IPASessionConfiguration {
struct {
int32_t defVBlank;
utils::Duration lineDuration;
+ Size size;
} sensor;
};
@@ -58,6 +56,8 @@ struct IPAActiveState {
struct {
uint32_t exposure;
double gain;
+ uint32_t constraintMode;
+ uint32_t exposureMode;
} agc;
struct {
@@ -76,24 +76,20 @@ struct IPAActiveState {
} toneMapping;
};
-struct IPAFrameContext {
- IPAFrameContext();
- IPAFrameContext(uint32_t id, const ControlList &reqControls);
-
+struct IPAFrameContext : public FrameContext {
struct {
uint32_t exposure;
double gain;
} sensor;
-
- uint32_t frame;
- ControlList frameControls;
};
struct IPAContext {
IPASessionConfiguration configuration;
IPAActiveState activeState;
- std::array<IPAFrameContext, kMaxFrameContexts> frameContexts;
+ FCQueue<IPAFrameContext> frameContexts;
+
+ ControlInfoMap::Map ctrlMap;
};
} /* namespace ipa::ipu3 */
diff --git a/src/ipa/ipu3/ipu3-ipa-design-guide.rst b/src/ipa/ipu3/ipu3-ipa-design-guide.rst
index e724fdda..72506397 100644
--- a/src/ipa/ipu3/ipu3-ipa-design-guide.rst
+++ b/src/ipa/ipu3/ipu3-ipa-design-guide.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
IPU3 IPA Architecture Design and Overview
=========================================
diff --git a/src/ipa/ipu3/ipu3.cpp b/src/ipa/ipu3/ipu3.cpp
index 2f6bb672..cdcdf1fb 100644
--- a/src/ipa/ipu3/ipu3.cpp
+++ b/src/ipa/ipu3/ipu3.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipu3.cpp - IPU3 Image Processing Algorithms
+ * IPU3 Image Processing Algorithms
*/
#include <algorithm>
@@ -18,6 +18,7 @@
#include <linux/intel-ipu3.h>
#include <linux/v4l2-controls.h>
+#include <libcamera/base/file.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -29,6 +30,7 @@
#include <libcamera/request.h>
#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/yaml_parser.h"
#include "algorithms/af.h"
#include "algorithms/agc.h"
@@ -38,6 +40,8 @@
#include "algorithms/tone_mapping.h"
#include "libipa/camera_sensor_helper.h"
+#include "ipa_context.h"
+
/* Minimum grid width, expressed as a number of cells */
static constexpr uint32_t kMinGridWidth = 16;
/* Maximum grid width, expressed as a number of cells */
@@ -51,6 +55,9 @@ static constexpr uint32_t kMinCellSizeLog2 = 3;
/* log2 of the maximum grid cell width and height, in pixels */
static constexpr uint32_t kMaxCellSizeLog2 = 6;
+/* Maximum number of frame contexts to be held */
+static constexpr uint32_t kMaxFrameContexts = 16;
+
namespace libcamera {
LOG_DEFINE_CATEGORY(IPAIPU3)
@@ -71,7 +78,7 @@ namespace ipa::ipu3 {
*
* At initialisation time, a CameraSensorHelper is instantiated to support
* camera-specific calculations, while the default controls are computed, and
- * the algorithms are constructed and placed in an ordered list.
+ * the algorithms are instantiated from the tuning data file.
*
* The IPU3 ImgU operates with a grid layout to divide the overall frame into
* rectangular cells of pixels. When the IPA is configured, we determine the
@@ -92,12 +99,14 @@ namespace ipa::ipu3 {
* fillParamsBuffer() call.
*
* The individual algorithms are split into modular components that are called
- * iteratively to allow them to process statistics from the ImgU in a defined
- * order.
+ * iteratively to allow them to process statistics from the ImgU in the order
+ * defined in the tuning data file.
*
- * The current implementation supports three core algorithms:
- * - Automatic white balance (AWB)
+ * The current implementation supports five core algorithms:
+ *
+ * - Auto focus (AF)
* - Automatic gain and exposure control (AGC)
+ * - Automatic white balance (AWB)
* - Black level correction (BLC)
* - Tone mapping (Gamma)
*
@@ -128,9 +137,11 @@ namespace ipa::ipu3 {
* sensor-specific tuning to adapt for Black Level compensation (BLC), Lens
* shading correction (SHD) and Color correction (CCM).
*/
-class IPAIPU3 : public IPAIPU3Interface
+class IPAIPU3 : public IPAIPU3Interface, public Module
{
public:
+ IPAIPU3();
+
int init(const IPASettings &settings,
const IPACameraSensorInfo &sensorInfo,
const ControlInfoMap &sensorControls,
@@ -150,14 +161,16 @@ public:
void processStatsBuffer(const uint32_t frame, const int64_t frameTimestamp,
const uint32_t bufferId,
const ControlList &sensorControls) override;
+
+protected:
+ std::string logPrefix() const override;
+
private:
void updateControls(const IPACameraSensorInfo &sensorInfo,
const ControlInfoMap &sensorControls,
ControlInfoMap *ipaControls);
void updateSessionConfiguration(const ControlInfoMap &sensorControls);
- bool validateSensorControls();
-
void setControls(unsigned int frame);
void calculateBdsGrid(const Size &bdsOutputSize);
@@ -171,13 +184,20 @@ private:
/* Interface to the Camera Helper */
std::unique_ptr<CameraSensorHelper> camHelper_;
- /* Maintain the algorithms used by the IPA */
- std::list<std::unique_ptr<ipa::ipu3::Algorithm>> algorithms_;
-
/* Local parameter storage */
struct IPAContext context_;
};
+IPAIPU3::IPAIPU3()
+ : context_({ {}, {}, { kMaxFrameContexts }, {} })
+{
+}
+
+std::string IPAIPU3::logPrefix() const
+{
+ return "ipu3";
+}
+
/**
* \brief Compute IPASessionConfiguration using the sensor information and the
* sensor V4L2 controls
@@ -267,32 +287,11 @@ void IPAIPU3::updateControls(const IPACameraSensorInfo &sensorInfo,
frameDurations[1],
frameDurations[2]);
+ controls.merge(context_.ctrlMap);
*ipaControls = ControlInfoMap(std::move(controls), controls::controls);
}
/**
- * \brief Validate that the sensor controls mandatory for the IPA exists
- */
-bool IPAIPU3::validateSensorControls()
-{
- static const uint32_t ctrls[] = {
- V4L2_CID_ANALOGUE_GAIN,
- V4L2_CID_EXPOSURE,
- V4L2_CID_VBLANK,
- };
-
- for (auto c : ctrls) {
- if (sensorCtrls_.find(c) == sensorCtrls_.end()) {
- LOG(IPAIPU3, Error) << "Unable to find sensor control "
- << utils::hex(c);
- return false;
- }
- }
-
- return true;
-}
-
-/**
* \brief Initialize the IPA module and its controls
*
* This function receives the camera sensor information from the pipeline
@@ -304,7 +303,7 @@ int IPAIPU3::init(const IPASettings &settings,
const ControlInfoMap &sensorControls,
ControlInfoMap *ipaControls)
{
- camHelper_ = CameraSensorHelperFactory::create(settings.sensorModel);
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
if (camHelper_ == nullptr) {
LOG(IPAIPU3, Error)
<< "Failed to create camera sensor helper for "
@@ -314,14 +313,39 @@ int IPAIPU3::init(const IPASettings &settings,
/* Clean context */
context_.configuration = {};
- context_.configuration.sensor.lineDuration = sensorInfo.lineLength * 1.0s / sensorInfo.pixelRate;
+ context_.configuration.sensor.lineDuration = sensorInfo.minLineLength
+ * 1.0s / sensorInfo.pixelRate;
+
+ /* Load the tuning data file. */
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPAIPU3, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
- /* Construct our Algorithms */
- algorithms_.push_back(std::make_unique<algorithms::Af>());
- algorithms_.push_back(std::make_unique<algorithms::Agc>());
- algorithms_.push_back(std::make_unique<algorithms::Awb>());
- algorithms_.push_back(std::make_unique<algorithms::BlackLevelCorrection>());
- algorithms_.push_back(std::make_unique<algorithms::ToneMapping>());
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ unsigned int version = (*data)["version"].get<uint32_t>(0);
+ if (version != 1) {
+ LOG(IPAIPU3, Error)
+ << "Invalid tuning file version " << version;
+ return -EINVAL;
+ }
+
+ if (!data->contains("algorithms")) {
+ LOG(IPAIPU3, Error)
+ << "Tuning file doesn't contain any algorithm";
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
/* Initialize controls. */
updateControls(sensorInfo, sensorControls, ipaControls);
@@ -348,6 +372,7 @@ int IPAIPU3::start()
*/
void IPAIPU3::stop()
{
+ context_.frameContexts.clear();
}
/**
@@ -446,6 +471,16 @@ int IPAIPU3::configure(const IPAConfigInfo &configInfo,
lensCtrls_ = configInfo.lensControls;
+ /* Clear the IPA context for the new streaming session. */
+ context_.activeState = {};
+ context_.configuration = {};
+ context_.frameContexts.clear();
+
+ /* Initialise the sensor configuration. */
+ context_.configuration.sensor.lineDuration = sensorInfo_.minLineLength
+ * 1.0s / sensorInfo_.pixelRate;
+ context_.configuration.sensor.size = sensorInfo_.outputSize;
+
/*
* Compute the sensor V4L2 controls to be used by the algorithms and
* to be set on the sensor.
@@ -454,23 +489,13 @@ int IPAIPU3::configure(const IPAConfigInfo &configInfo,
calculateBdsGrid(configInfo.bdsOutputSize);
- /* Clean IPAActiveState at each reconfiguration. */
- context_.activeState = {};
- IPAFrameContext initFrameContext;
- context_.frameContexts.fill(initFrameContext);
-
- if (!validateSensorControls()) {
- LOG(IPAIPU3, Error) << "Sensor control validation failed.";
- return -EINVAL;
- }
-
/* Update the camera controls using the new sensor settings. */
updateControls(sensorInfo_, sensorCtrls_, ipaControls);
/* Update the IPASessionConfiguration using the sensor settings. */
updateSessionConfiguration(sensorCtrls_);
- for (auto const &algo : algorithms_) {
+ for (auto const &algo : algorithms()) {
int ret = algo->configure(context_, configInfo);
if (ret)
return ret;
@@ -538,8 +563,10 @@ void IPAIPU3::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId)
*/
params->use = {};
- for (auto const &algo : algorithms_)
- algo->prepare(context_, params);
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+
+ for (auto const &algo : algorithms())
+ algo->prepare(context_, frame, frameContext, params);
paramsBufferReady.emit(frame);
}
@@ -569,33 +596,18 @@ void IPAIPU3::processStatsBuffer(const uint32_t frame,
const ipu3_uapi_stats_3a *stats =
reinterpret_cast<ipu3_uapi_stats_3a *>(mem.data());
- IPAFrameContext &frameContext = context_.frameContexts[frame % kMaxFrameContexts];
-
- if (frameContext.frame != frame)
- LOG(IPAIPU3, Warning) << "Frame " << frame << " does not match its frame context";
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
frameContext.sensor.exposure = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
frameContext.sensor.gain = camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
- double lineDuration = context_.configuration.sensor.lineDuration.get<std::micro>();
- int32_t vBlank = context_.configuration.sensor.defVBlank;
- ControlList ctrls(controls::controls);
+ ControlList metadata(controls::controls);
- for (auto const &algo : algorithms_)
- algo->process(context_, &frameContext, stats);
+ for (auto const &algo : algorithms())
+ algo->process(context_, frame, frameContext, stats, metadata);
setControls(frame);
- /* \todo Use VBlank value calculated from each frame exposure. */
- int64_t frameDuration = (vBlank + sensorInfo_.outputSize.height) * lineDuration;
- ctrls.set(controls::FrameDuration, frameDuration);
-
- ctrls.set(controls::AnalogueGain, frameContext.sensor.gain);
-
- ctrls.set(controls::ColourTemperature, context_.activeState.awb.temperatureK);
-
- ctrls.set(controls::ExposureTime, frameContext.sensor.exposure * lineDuration);
-
/*
* \todo The Metadata provides a path to getting extended data
* out to the application. Further data such as a simplifed Histogram
@@ -604,7 +616,7 @@ void IPAIPU3::processStatsBuffer(const uint32_t frame,
* likely want to avoid putting platform specific metadata in.
*/
- metadataReady.emit(frame, ctrls);
+ metadataReady.emit(frame, metadata);
}
/**
@@ -617,8 +629,10 @@ void IPAIPU3::processStatsBuffer(const uint32_t frame,
*/
void IPAIPU3::queueRequest(const uint32_t frame, const ControlList &controls)
{
- /* \todo Start processing for 'frame' based on 'controls'. */
- context_.frameContexts[frame % kMaxFrameContexts] = { frame, controls };
+ IPAFrameContext &frameContext = context_.frameContexts.alloc(frame);
+
+ for (auto const &algo : algorithms())
+ algo->queueRequest(context_, frame, frameContext, controls);
}
/**
@@ -659,7 +673,7 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
1,
- "PipelineHandlerIPU3",
+ "ipu3",
"ipu3",
};
diff --git a/src/ipa/ipu3/meson.build b/src/ipa/ipu3/meson.build
index 3194111a..66c39843 100644
--- a/src/ipa/ipu3/meson.build
+++ b/src/ipa/ipu3/meson.build
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: CC0-1.0
subdir('algorithms')
+subdir('data')
ipa_name = 'ipa_ipu3'
@@ -28,3 +29,5 @@ if ipa_sign_module
install : false,
build_by_default : true)
endif
+
+ipa_names += ipa_name
diff --git a/src/ipa/ipu3/module.h b/src/ipa/ipu3/module.h
index d94fc459..60f65cc4 100644
--- a/src/ipa/ipu3/module.h
+++ b/src/ipa/ipu3/module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.h - IPU3 IPA Module
+ * IPU3 IPA Module
*/
#pragma once
diff --git a/src/ipa/libipa/agc_mean_luminance.cpp b/src/ipa/libipa/agc_mean_luminance.cpp
new file mode 100644
index 00000000..271b5ae4
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.cpp
@@ -0,0 +1,577 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Base class for mean luminance AGC algorithms
+ */
+
+#include "agc_mean_luminance.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "exposure_mode_helper.h"
+
+using namespace libcamera::controls;
+
+/**
+ * \file agc_mean_luminance.h
+ * \brief Base class implementing mean luminance AEGC
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(AgcMeanLuminance)
+
+namespace ipa {
+
+/*
+ * Number of frames for which to run the algorithm at full speed, before slowing
+ * down to prevent large and jarring changes in exposure from frame to frame.
+ */
+static constexpr uint32_t kNumStartupFrames = 10;
+
+/*
+ * Default relative luminance target
+ *
+ * This value should be chosen so that when the camera points at a grey target,
+ * the resulting image brightness looks "right". Custom values can be passed
+ * as the relativeLuminanceTarget value in sensor tuning files.
+ */
+static constexpr double kDefaultRelativeLuminanceTarget = 0.16;
+
+/**
+ * \struct AgcMeanLuminance::AgcConstraint
+ * \brief The boundaries and target for an AeConstraintMode constraint
+ *
+ * This structure describes an AeConstraintMode constraint for the purposes of
+ * this algorithm. These constraints are expressed as a pair of quantile
+ * boundaries for a histogram, along with a luminance target and a bounds-type.
+ * The algorithm uses the constraints by ensuring that the defined portion of a
+ * luminance histogram (I.E. lying between the two quantiles) is above or below
+ * the given luminance value.
+ */
+
+/**
+ * \enum AgcMeanLuminance::AgcConstraint::Bound
+ * \brief Specify whether the constraint defines a lower or upper bound
+ * \var AgcMeanLuminance::AgcConstraint::lower
+ * \brief The constraint defines a lower bound
+ * \var AgcMeanLuminance::AgcConstraint::upper
+ * \brief The constraint defines an upper bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::bound
+ * \brief The type of constraint bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qLo
+ * \brief The lower quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qHi
+ * \brief The upper quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::yTarget
+ * \brief The luminance target for the constraint
+ */
+
+/**
+ * \class AgcMeanLuminance
+ * \brief A mean-based auto-exposure algorithm
+ *
+ * This algorithm calculates a shutter time, analogue and digital gain such that
+ * the normalised mean luminance value of an image is driven towards a target,
+ * which itself is discovered from tuning data. The algorithm is a two-stage
+ * process.
+ *
+ * In the first stage, an initial gain value is derived by iteratively comparing
+ * the gain-adjusted mean luminance across the entire image against a target,
+ * and selecting a value which pushes it as closely as possible towards the
+ * target.
+ *
+ * In the second stage we calculate the gain required to drive the average of a
+ * section of a histogram to a target value, where the target and the boundaries
+ * of the section of the histogram used in the calculation are taken from the
+ * values defined for the currently configured AeConstraintMode within the
+ * tuning data. This class provides a helper function to parse those tuning data
+ * to discover the constraints, and so requires a specific format for those
+ * data which is described in \ref parseTuningData(). The gain from the first
+ * stage is then clamped to the gain from this stage.
+ *
+ * The final gain is used to adjust the effective exposure value of the image,
+ * and that new exposure value is divided into shutter time, analogue gain and
+ * digital gain according to the selected AeExposureMode. This class uses the
+ * \ref ExposureModeHelper class to assist in that division, and expects the
+ * data needed to initialise that class to be present in tuning data in a
+ * format described in \ref parseTuningData().
+ *
+ * In order to be able to use this algorithm an IPA module needs to be able to
+ * do the following:
+ *
+ * 1. Provide a luminance estimation across an entire image.
+ * 2. Provide a luminance Histogram for the image to use in calculating
+ * constraint compliance. The precision of the Histogram that is available
+ * will determine the supportable precision of the constraints.
+ *
+ * IPA modules that want to use this class to implement their AEGC algorithm
+ * should derive it and provide an overriding estimateLuminance() function for
+ * this class to use. They must call parseTuningData() in init(), and must also
+ * call setLimits() and resetFrameCounter() in configure(). They may then use
+ * calculateNewEv() in process(). If the limits passed to setLimits() change for
+ * any reason (for example, in response to a FrameDurationLimit control being
+ * passed in queueRequest()) then setLimits() must be called again with the new
+ * values.
+ */
+
+AgcMeanLuminance::AgcMeanLuminance()
+ : frameCount_(0), filteredExposure_(0s), relativeLuminanceTarget_(0)
+{
+}
+
+AgcMeanLuminance::~AgcMeanLuminance() = default;
+
+void AgcMeanLuminance::parseRelativeLuminanceTarget(const YamlObject &tuningData)
+{
+ relativeLuminanceTarget_ =
+ tuningData["relativeLuminanceTarget"].get<double>(kDefaultRelativeLuminanceTarget);
+}
+
+void AgcMeanLuminance::parseConstraint(const YamlObject &modeDict, int32_t id)
+{
+ for (const auto &[boundName, content] : modeDict.asDict()) {
+ if (boundName != "upper" && boundName != "lower") {
+ LOG(AgcMeanLuminance, Warning)
+ << "Ignoring unknown constraint bound '" << boundName << "'";
+ continue;
+ }
+
+ unsigned int idx = static_cast<unsigned int>(boundName == "upper");
+ AgcConstraint::Bound bound = static_cast<AgcConstraint::Bound>(idx);
+ double qLo = content["qLo"].get<double>().value_or(0.98);
+ double qHi = content["qHi"].get<double>().value_or(1.0);
+ double yTarget =
+ content["yTarget"].getList<double>().value_or(std::vector<double>{ 0.5 }).at(0);
+
+ AgcConstraint constraint = { bound, qLo, qHi, yTarget };
+
+ if (!constraintModes_.count(id))
+ constraintModes_[id] = {};
+
+ if (idx)
+ constraintModes_[id].push_back(constraint);
+ else
+ constraintModes_[id].insert(constraintModes_[id].begin(), constraint);
+ }
+}
+
+int AgcMeanLuminance::parseConstraintModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableConstraintModes;
+
+ const YamlObject &yamlConstraintModes = tuningData[controls::AeConstraintMode.name()];
+ if (yamlConstraintModes.isDictionary()) {
+ for (const auto &[modeName, modeDict] : yamlConstraintModes.asDict()) {
+ if (AeConstraintModeNameValueMap.find(modeName) ==
+ AeConstraintModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown constraint mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeDict.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid constraint mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ parseConstraint(modeDict,
+ AeConstraintModeNameValueMap.at(modeName));
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If the tuning data file contains no constraints then we use the
+ * default constraint that the IPU3/RkISP1 Agc algorithms were adhering
+ * to anyway before centralisation; this constraint forces the top 2% of
+ * the histogram to be at least 0.5.
+ */
+ if (constraintModes_.empty()) {
+ AgcConstraint constraint = {
+ AgcConstraint::Bound::lower,
+ 0.98,
+ 1.0,
+ 0.5
+ };
+
+ constraintModes_[controls::ConstraintNormal].insert(
+ constraintModes_[controls::ConstraintNormal].begin(),
+ constraint);
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at("ConstraintNormal"));
+ }
+
+ controls_[&controls::AeConstraintMode] = ControlInfo(availableConstraintModes);
+
+ return 0;
+}
+
+int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableExposureModes;
+
+ const YamlObject &yamlExposureModes = tuningData[controls::AeExposureMode.name()];
+ if (yamlExposureModes.isDictionary()) {
+ for (const auto &[modeName, modeValues] : yamlExposureModes.asDict()) {
+ if (AeExposureModeNameValueMap.find(modeName) ==
+ AeExposureModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown exposure mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeValues.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid exposure mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ std::vector<uint32_t> shutters =
+ modeValues["shutter"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
+ std::vector<double> gains =
+ modeValues["gain"].getList<double>().value_or(std::vector<double>{});
+
+ if (shutters.size() != gains.size()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Shutter and gain array sizes unequal";
+ return -EINVAL;
+ }
+
+ if (shutters.empty()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Shutter and gain arrays are empty";
+ return -EINVAL;
+ }
+
+ std::vector<std::pair<utils::Duration, double>> stages;
+ for (unsigned int i = 0; i < shutters.size(); i++) {
+ stages.push_back({
+ std::chrono::microseconds(shutters[i]),
+ gains[i]
+ });
+ }
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[AeExposureModeNameValueMap.at(modeName)] = helper;
+ availableExposureModes.push_back(AeExposureModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If we don't have any exposure modes in the tuning data we create an
+ * ExposureModeHelper using an empty vector of stages. This will result
+ * in the ExposureModeHelper simply driving the shutter as high as
+ * possible before touching gain.
+ */
+ if (availableExposureModes.empty()) {
+ int32_t exposureModeId = AeExposureModeNameValueMap.at("ExposureNormal");
+ std::vector<std::pair<utils::Duration, double>> stages = { };
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[exposureModeId] = helper;
+ availableExposureModes.push_back(exposureModeId);
+ }
+
+ controls_[&controls::AeExposureMode] = ControlInfo(availableExposureModes);
+
+ return 0;
+}
+
+/**
+ * \brief Parse tuning data for AeConstraintMode and AeExposureMode controls
+ * \param[in] tuningData the YamlObject representing the tuning data
+ *
+ * This function parses tuning data to build the list of allowed values for the
+ * AeConstraintMode and AeExposureMode controls. Those tuning data must provide
+ * the data in a specific format; the Agc algorithm's tuning data should contain
+ * a dictionary called AeConstraintMode containing per-mode setting dictionaries
+ * with the key being a value from \ref controls::AeConstraintModeNameValueMap.
+ * Each mode dict may contain either a "lower" or "upper" key or both, for
+ * example:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeConstraintMode:
+ * ConstraintNormal:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * ConstraintHighlight:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * upper:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.8
+ *
+ * \endcode
+ *
+ * For the AeExposureMode control the data should contain a dictionary called
+ * AeExposureMode containing per-mode setting dictionaries with the key being a
+ * value from \ref controls::AeExposureModeNameValueMap. Each mode dict should
+ * contain an array of shutter times with the key "shutter" and an array of gain
+ * values with the key "gain", in this format:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeExposureMode:
+ * ExposureNormal:
+ * shutter: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ * ExposureShort:
+ * shutter: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ *
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int AgcMeanLuminance::parseTuningData(const YamlObject &tuningData)
+{
+ int ret;
+
+ parseRelativeLuminanceTarget(tuningData);
+
+ ret = parseConstraintModes(tuningData);
+ if (ret)
+ return ret;
+
+ return parseExposureModes(tuningData);
+}
+
+/**
+ * \brief Set the ExposureModeHelper limits for this class
+ * \param[in] minShutter Minimum shutter time to allow
+ * \param[in] maxShutter Maximum shutter time to allow
+ * \param[in] minGain Minimum gain to allow
+ * \param[in] maxGain Maximum gain to allow
+ *
+ * This function calls \ref ExposureModeHelper::setLimits() for each
+ * ExposureModeHelper that has been created for this class.
+ */
+void AgcMeanLuminance::setLimits(utils::Duration minShutter,
+ utils::Duration maxShutter,
+ double minGain, double maxGain)
+{
+ for (auto &[id, helper] : exposureModeHelpers_)
+ helper->setLimits(minShutter, maxShutter, minGain, maxGain);
+}
+
+/**
+ * \fn AgcMeanLuminance::constraintModes()
+ * \brief Get the constraint modes that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::exposureModeHelpers()
+ * \brief Get the ExposureModeHelpers that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::controls()
+ * \brief Get the controls that have been generated after parsing tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::estimateLuminance(const double gain)
+ * \brief Estimate the luminance of an image, adjusted by a given gain
+ * \param[in] gain The gain with which to adjust the luminance estimate
+ *
+ * This function estimates the average relative luminance of the frame that
+ * would be output by the sensor if an additional \a gain was applied. It is a
+ * pure virtual function because estimation of luminance is a hardware-specific
+ * operation, which depends wholly on the format of the stats that are delivered
+ * to libcamera from the ISP. Derived classes must override this function with
+ * one that calculates the normalised mean luminance value across the entire
+ * image.
+ *
+ * \return The normalised relative luminance of the image
+ */
+
+/**
+ * \brief Estimate the initial gain needed to achieve a relative luminance
+ * target
+ * \return The calculated initial gain
+ */
+double AgcMeanLuminance::estimateInitialGain() const
+{
+ double yTarget = relativeLuminanceTarget_;
+ double yGain = 1.0;
+
+ /*
+ * To account for non-linearity caused by saturation, the value needs to
+ * be estimated in an iterative process, as multiplying by a gain will
+ * not increase the relative luminance by the same factor if some image
+ * regions are saturated.
+ */
+ for (unsigned int i = 0; i < 8; i++) {
+ double yValue = estimateLuminance(yGain);
+ double extra_gain = std::min(10.0, yTarget / (yValue + .001));
+
+ yGain *= extra_gain;
+ LOG(AgcMeanLuminance, Debug) << "Y value: " << yValue
+ << ", Y target: " << yTarget
+ << ", gives gain " << yGain;
+
+ if (utils::abs_diff(extra_gain, 1.0) < 0.01)
+ break;
+ }
+
+ return yGain;
+}
+
+/**
+ * \brief Clamp gain within the bounds of a defined constraint
+ * \param[in] constraintModeIndex The index of the constraint to adhere to
+ * \param[in] hist A histogram over which to calculate inter-quantile means
+ * \param[in] gain The gain to clamp
+ *
+ * \return The gain clamped within the constraint bounds
+ */
+double AgcMeanLuminance::constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain)
+{
+ std::vector<AgcConstraint> &constraints = constraintModes_[constraintModeIndex];
+ for (const AgcConstraint &constraint : constraints) {
+ double newGain = constraint.yTarget * hist.bins() /
+ hist.interQuantileMean(constraint.qLo, constraint.qHi);
+
+ if (constraint.bound == AgcConstraint::Bound::lower &&
+ newGain > gain)
+ gain = newGain;
+
+ if (constraint.bound == AgcConstraint::Bound::upper &&
+ newGain < gain)
+ gain = newGain;
+ }
+
+ return gain;
+}
+
+/**
+ * \brief Apply a filter on the exposure value to limit the speed of changes
+ * \param[in] exposureValue The target exposure from the AGC algorithm
+ *
+ * The speed of the filter is adaptive, and will produce the target quicker
+ * during startup, or when the target exposure is within 20% of the most recent
+ * filter output.
+ *
+ * \return The filtered exposure
+ */
+utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue)
+{
+ double speed = 0.2;
+
+ /* Adapt instantly if we are in startup phase. */
+ if (frameCount_ < kNumStartupFrames)
+ speed = 1.0;
+
+ /*
+ * If we are close to the desired result, go faster to avoid making
+ * multiple micro-adjustments.
+ * \todo Make this customisable?
+ */
+ if (filteredExposure_ < 1.2 * exposureValue &&
+ filteredExposure_ > 0.8 * exposureValue)
+ speed = sqrt(speed);
+
+ filteredExposure_ = speed * exposureValue +
+ filteredExposure_ * (1.0 - speed);
+
+ return filteredExposure_;
+}
+
+/**
+ * \brief Calculate the new exposure value and splut it between shutter time and gain
+ * \param[in] constraintModeIndex The index of the current constraint mode
+ * \param[in] exposureModeIndex The index of the current exposure mode
+ * \param[in] yHist A Histogram from the ISP statistics to use in constraining
+ * the calculated gain
+ * \param[in] effectiveExposureValue The EV applied to the frame from which the
+ * statistics in use derive
+ *
+ * Calculate a new exposure value to try to obtain the target. The calculated
+ * exposure value is filtered to prevent rapid changes from frame to frame, and
+ * divided into shutter time, analogue and digital gain.
+ *
+ * \return Tuple of shutter time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+AgcMeanLuminance::calculateNewEv(uint32_t constraintModeIndex,
+ uint32_t exposureModeIndex,
+ const Histogram &yHist,
+ utils::Duration effectiveExposureValue)
+{
+ /*
+ * The pipeline handler should validate that we have received an allowed
+ * value for AeExposureMode.
+ */
+ std::shared_ptr<ExposureModeHelper> exposureModeHelper =
+ exposureModeHelpers_.at(exposureModeIndex);
+
+ double gain = estimateInitialGain();
+ gain = constraintClampGain(constraintModeIndex, yHist, gain);
+
+ /*
+ * We don't check whether we're already close to the target, because
+ * even if the effective exposure value is the same as the last frame's
+ * we could have switched to an exposure mode that would require a new
+ * pass through the splitExposure() function.
+ */
+
+ utils::Duration newExposureValue = effectiveExposureValue * gain;
+
+ /*
+ * We filter the exposure value to make sure changes are not too jarring
+ * from frame to frame.
+ */
+ newExposureValue = filterExposure(newExposureValue);
+
+ frameCount_++;
+ return exposureModeHelper->splitExposure(newExposureValue);
+}
+
+/**
+ * \fn AgcMeanLuminance::resetFrameCount()
+ * \brief Reset the frame counter
+ *
+ * This function resets the internal frame counter, which exists to help the
+ * algorithm decide whether it should respond instantly or not. The expectation
+ * is for derived classes to call this function before each camera start call in
+ * their configure() function.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/agc_mean_luminance.h b/src/ipa/libipa/agc_mean_luminance.h
new file mode 100644
index 00000000..0a81c6d2
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ agc_mean_luminance.h - Base class for mean luminance AGC algorithms
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "exposure_mode_helper.h"
+#include "histogram.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AgcMeanLuminance
+{
+public:
+ AgcMeanLuminance();
+ virtual ~AgcMeanLuminance();
+
+ struct AgcConstraint {
+ enum class Bound {
+ lower = 0,
+ upper = 1
+ };
+ Bound bound;
+ double qLo;
+ double qHi;
+ double yTarget;
+ };
+
+ int parseTuningData(const YamlObject &tuningData);
+
+ void setLimits(utils::Duration minShutter, utils::Duration maxShutter,
+ double minGain, double maxGain);
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes()
+ {
+ return constraintModes_;
+ }
+
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers()
+ {
+ return exposureModeHelpers_;
+ }
+
+ ControlInfoMap::Map controls()
+ {
+ return controls_;
+ }
+
+ std::tuple<utils::Duration, double, double>
+ calculateNewEv(uint32_t constraintModeIndex, uint32_t exposureModeIndex,
+ const Histogram &yHist, utils::Duration effectiveExposureValue);
+
+ void resetFrameCount()
+ {
+ frameCount_ = 0;
+ }
+
+private:
+ virtual double estimateLuminance(const double gain) const = 0;
+
+ void parseRelativeLuminanceTarget(const YamlObject &tuningData);
+ void parseConstraint(const YamlObject &modeDict, int32_t id);
+ int parseConstraintModes(const YamlObject &tuningData);
+ int parseExposureModes(const YamlObject &tuningData);
+ double estimateInitialGain() const;
+ double constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain);
+ utils::Duration filterExposure(utils::Duration exposureValue);
+
+ uint64_t frameCount_;
+ utils::Duration filteredExposure_;
+ double relativeLuminanceTarget_;
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes_;
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers_;
+ ControlInfoMap::Map controls_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/algorithm.cpp b/src/ipa/libipa/algorithm.cpp
index 8549fe3f..201efdfd 100644
--- a/src/ipa/libipa/algorithm.cpp
+++ b/src/ipa/libipa/algorithm.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.cpp - IPA control algorithm interface
+ * IPA control algorithm interface
*/
#include "algorithm.h"
@@ -67,10 +67,29 @@ namespace ipa {
*/
/**
+ * \fn Algorithm::queueRequest()
+ * \brief Provide control values to the algorithm
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame number to apply the control values
+ * \param[in] frameContext The current frame's context
+ * \param[in] controls The list of user controls
+ *
+ * This function is called for each request queued to the camera. It provides
+ * the controls stored in the request to the algorithm. The \a frame number
+ * is the Request sequence number and identifies the desired corresponding
+ * frame to target for the controls to take effect.
+ *
+ * Algorithms shall read the applicable controls and store their value for later
+ * use during frame processing.
+ */
+
+/**
* \fn Algorithm::prepare()
* \brief Fill the \a params buffer with ISP processing parameters for a frame
* \param[in] context The shared IPA context
- * \param[out] params The ISP specific parameters.
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The FrameContext for this frame
+ * \param[out] params The ISP specific parameters
*
* This function is called for every frame when the camera is running before it
* is processed by the ISP to prepare the ISP processing parameters for that
@@ -85,13 +104,15 @@ namespace ipa {
* \fn Algorithm::process()
* \brief Process ISP statistics, and run algorithm operations
* \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
* \param[in] frameContext The current frame's context
* \param[in] stats The IPA statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
*
* This function is called while camera is running for every frame processed by
* the ISP, to process statistics generated from that frame by the ISP.
- * Algorithms shall use this data to run calculations and update their state
- * accordingly.
+ * Algorithms shall use this data to run calculations, update their state
+ * accordingly, and fill the frame metadata.
*
* Processing shall not take an undue amount of time, and any extended or
* computationally expensive calculations or operations must be handled
diff --git a/src/ipa/libipa/algorithm.h b/src/ipa/libipa/algorithm.h
index 2a8871d8..9a19dbd6 100644
--- a/src/ipa/libipa/algorithm.h
+++ b/src/ipa/libipa/algorithm.h
@@ -2,13 +2,16 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.h - ISP control algorithm interface
+ * ISP control algorithm interface
*/
#pragma once
#include <memory>
+#include <stdint.h>
#include <string>
+#include <libcamera/controls.h>
+
namespace libcamera {
class YamlObject;
@@ -35,14 +38,25 @@ public:
return 0;
}
+ virtual void queueRequest([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] const ControlList &controls)
+ {
+ }
+
virtual void prepare([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
[[maybe_unused]] typename Module::Params *params)
{
}
virtual void process([[maybe_unused]] typename Module::Context &context,
- [[maybe_unused]] typename Module::FrameContext *frameContext,
- [[maybe_unused]] const typename Module::Stats *stats)
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] const typename Module::Stats *stats,
+ [[maybe_unused]] ControlList &metadata)
{
}
};
diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp
index d4dba497..2cd61fcc 100644
--- a/src/ipa/libipa/camera_sensor_helper.cpp
+++ b/src/ipa/libipa/camera_sensor_helper.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_helper.cpp - Helper class that performs sensor-specific
+ * Helper class that performs sensor-specific
* parameter computations
*/
#include "camera_sensor_helper.h"
@@ -43,7 +43,8 @@ namespace ipa {
* \brief Construct a CameraSensorHelper instance
*
* CameraSensorHelper derived class instances shall never be constructed
- * manually but always through the CameraSensorHelperFactory::create() function.
+ * manually but always through the CameraSensorHelperFactoryBase::create()
+ * function.
*/
/**
@@ -217,27 +218,25 @@ double CameraSensorHelper::gain(uint32_t gainCode) const
*/
/**
- * \class CameraSensorHelperFactory
- * \brief Registration of CameraSensorHelperFactory classes and creation of instances
+ * \class CameraSensorHelperFactoryBase
+ * \brief Base class for camera sensor helper factories
*
- * To facilitate discovery and instantiation of CameraSensorHelper classes, the
- * CameraSensorHelperFactory class maintains a registry of camera sensor helper
- * sub-classes. Each CameraSensorHelper subclass shall register itself using the
- * REGISTER_CAMERA_SENSOR_HELPER() macro, which will create a corresponding
- * instance of a CameraSensorHelperFactory subclass and register it with the
- * static list of factories.
+ * The CameraSensorHelperFactoryBase class is the base of all specializations of
+ * the CameraSensorHelperFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
*/
/**
- * \brief Construct a camera sensor helper factory
+ * \brief Construct a camera sensor helper factory base
* \param[in] name Name of the camera sensor helper class
*
- * Creating an instance of the factory registers it with the global list of
+ * Creating an instance of the factory base registers it with the global list of
* factories, accessible through the factories() function.
*
- * The factory \a name is used for debug purpose and shall be unique.
+ * The factory \a name is used to look up factories and shall be unique.
*/
-CameraSensorHelperFactory::CameraSensorHelperFactory(const std::string name)
+CameraSensorHelperFactoryBase::CameraSensorHelperFactoryBase(const std::string name)
: name_(name)
{
registerType(this);
@@ -252,17 +251,16 @@ CameraSensorHelperFactory::CameraSensorHelperFactory(const std::string name)
* corresponding to the named factory or a null pointer if no such factory
* exists
*/
-std::unique_ptr<CameraSensorHelper> CameraSensorHelperFactory::create(const std::string &name)
+std::unique_ptr<CameraSensorHelper> CameraSensorHelperFactoryBase::create(const std::string &name)
{
- std::vector<CameraSensorHelperFactory *> &factories =
- CameraSensorHelperFactory::factories();
+ const std::vector<CameraSensorHelperFactoryBase *> &factories =
+ CameraSensorHelperFactoryBase::factories();
- for (CameraSensorHelperFactory *factory : factories) {
+ for (const CameraSensorHelperFactoryBase *factory : factories) {
if (name != factory->name_)
continue;
- CameraSensorHelper *helper = factory->createInstance();
- return std::unique_ptr<CameraSensorHelper>(helper);
+ return factory->createInstance();
}
return nullptr;
@@ -275,10 +273,10 @@ std::unique_ptr<CameraSensorHelper> CameraSensorHelperFactory::create(const std:
* The caller is responsible to guarantee the uniqueness of the camera sensor
* helper name.
*/
-void CameraSensorHelperFactory::registerType(CameraSensorHelperFactory *factory)
+void CameraSensorHelperFactoryBase::registerType(CameraSensorHelperFactoryBase *factory)
{
- std::vector<CameraSensorHelperFactory *> &factories =
- CameraSensorHelperFactory::factories();
+ std::vector<CameraSensorHelperFactoryBase *> &factories =
+ CameraSensorHelperFactoryBase::factories();
factories.push_back(factory);
}
@@ -287,33 +285,49 @@ void CameraSensorHelperFactory::registerType(CameraSensorHelperFactory *factory)
* \brief Retrieve the list of all camera sensor helper factories
* \return The list of camera sensor helper factories
*/
-std::vector<CameraSensorHelperFactory *> &CameraSensorHelperFactory::factories()
+std::vector<CameraSensorHelperFactoryBase *> &CameraSensorHelperFactoryBase::factories()
{
/*
* The static factories map is defined inside the function to ensure
* it gets initialized on first use, without any dependency on link
* order.
*/
- static std::vector<CameraSensorHelperFactory *> factories;
+ static std::vector<CameraSensorHelperFactoryBase *> factories;
return factories;
}
/**
- * \fn CameraSensorHelperFactory::createInstance()
- * \brief Create an instance of the CameraSensorHelper corresponding to the
- * factory
+ * \class CameraSensorHelperFactory
+ * \brief Registration of CameraSensorHelperFactory classes and creation of instances
+ * \tparam _Helper The camera sensor helper class type for this factory
*
- * This virtual function is implemented by the REGISTER_CAMERA_SENSOR_HELPER()
- * macro. It creates a camera sensor helper instance associated with the camera
- * sensor model.
+ * To facilitate discovery and instantiation of CameraSensorHelper classes, the
+ * CameraSensorHelperFactory class implements auto-registration of camera sensor
+ * helpers. Each CameraSensorHelper subclass shall register itself using the
+ * REGISTER_CAMERA_SENSOR_HELPER() macro, which will create a corresponding
+ * instance of a CameraSensorHelperFactory subclass and register it with the
+ * static list of factories.
+ */
+
+/**
+ * \fn CameraSensorHelperFactory::CameraSensorHelperFactory(const char *name)
+ * \brief Construct a camera sensor helper factory
+ * \param[in] name Name of the camera sensor helper class
*
- * \return A pointer to a newly constructed instance of the CameraSensorHelper
- * subclass corresponding to the factory
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the CameraSensorHelperFactoryBase::factories()
+ * function.
+ *
+ * The factory \a name is used to look up factories and shall be unique.
*/
/**
- * \var CameraSensorHelperFactory::name_
- * \brief The name of the factory
+ * \fn CameraSensorHelperFactory::createInstance() const
+ * \brief Create an instance of the CameraSensorHelper corresponding to the
+ * factory
+ *
+ * \return A unique pointer to a newly constructed instance of the
+ * CameraSensorHelper subclass corresponding to the factory
*/
/**
@@ -352,6 +366,35 @@ static constexpr double expGainDb(double step)
return log2_10 * step / 20;
}
+class CameraSensorHelperAr0521 : public CameraSensorHelper
+{
+public:
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+
+private:
+ static constexpr double kStep_ = 16;
+};
+
+uint32_t CameraSensorHelperAr0521::gainCode(double gain) const
+{
+ gain = std::clamp(gain, 1.0, 15.5);
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (gain / (1 << coarse) - 1) * kStep_;
+
+ return (coarse << 4) | (fine & 0xf);
+}
+
+double CameraSensorHelperAr0521::gain(uint32_t gainCode) const
+{
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
+
+ return (1 << coarse) * (1 + fine / kStep_);
+}
+
+REGISTER_CAMERA_SENSOR_HELPER("ar0521", CameraSensorHelperAr0521)
+
class CameraSensorHelperImx219 : public CameraSensorHelper
{
public:
@@ -374,6 +417,17 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("imx258", CameraSensorHelperImx258)
+class CameraSensorHelperImx283 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx283()
+ {
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 0, 2048, -1, 2048 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx283", CameraSensorHelperImx283)
+
class CameraSensorHelperImx290 : public CameraSensorHelper
{
public:
@@ -396,6 +450,33 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("imx296", CameraSensorHelperImx296)
+class CameraSensorHelperImx327 : public CameraSensorHelperImx290
+{
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx327", CameraSensorHelperImx327)
+
+class CameraSensorHelperImx335 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx335()
+ {
+ gainType_ = AnalogueGainExponential;
+ gainConstants_.exp = { 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx335", CameraSensorHelperImx335)
+
+class CameraSensorHelperImx415 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx415()
+ {
+ gainType_ = AnalogueGainExponential;
+ gainConstants_.exp = { 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx415", CameraSensorHelperImx415)
+
class CameraSensorHelperImx477 : public CameraSensorHelper
{
public:
@@ -407,6 +488,21 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("imx477", CameraSensorHelperImx477)
+class CameraSensorHelperOv2685 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv2685()
+ {
+ /*
+ * The Sensor Manual doesn't appear to document the gain model.
+ * This has been validated with some empirical testing only.
+ */
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov2685", CameraSensorHelperOv2685)
+
class CameraSensorHelperOv2740 : public CameraSensorHelper
{
public:
@@ -418,6 +514,17 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("ov2740", CameraSensorHelperOv2740)
+class CameraSensorHelperOv4689 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv4689()
+ {
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov4689", CameraSensorHelperOv4689)
+
class CameraSensorHelperOv5640 : public CameraSensorHelper
{
public:
@@ -429,6 +536,17 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("ov5640", CameraSensorHelperOv5640)
+class CameraSensorHelperOv5647 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5647()
+ {
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 1, 0, 0, 16 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5647", CameraSensorHelperOv5647)
+
class CameraSensorHelperOv5670 : public CameraSensorHelper
{
public:
@@ -462,6 +580,35 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("ov5693", CameraSensorHelperOv5693)
+class CameraSensorHelperOv64a40 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv64a40()
+ {
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov64a40", CameraSensorHelperOv64a40)
+
+class CameraSensorHelperOv8858 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv8858()
+ {
+ gainType_ = AnalogueGainLinear;
+
+ /*
+ * \todo Validate the selected 1/128 step value as it differs
+ * from what the sensor manual describes.
+ *
+ * See: https://patchwork.linuxtv.org/project/linux-media/patch/20221106171129.166892-2-nicholas@rothemail.net/#142267
+ */
+ gainConstants_.linear = { 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov8858", CameraSensorHelperOv8858)
+
class CameraSensorHelperOv8865 : public CameraSensorHelper
{
public:
diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h
index 7351fc7c..0d99073b 100644
--- a/src/ipa/libipa/camera_sensor_helper.h
+++ b/src/ipa/libipa/camera_sensor_helper.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_helper.h - Helper class that performs sensor-specific parameter computations
+ * Helper class that performs sensor-specific parameter computations
*/
#pragma once
@@ -58,39 +58,44 @@ private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelper)
};
-class CameraSensorHelperFactory
+class CameraSensorHelperFactoryBase
{
public:
- CameraSensorHelperFactory(const std::string name);
- virtual ~CameraSensorHelperFactory() = default;
+ CameraSensorHelperFactoryBase(const std::string name);
+ virtual ~CameraSensorHelperFactoryBase() = default;
static std::unique_ptr<CameraSensorHelper> create(const std::string &name);
- static void registerType(CameraSensorHelperFactory *factory);
- static std::vector<CameraSensorHelperFactory *> &factories();
-
-protected:
- virtual CameraSensorHelper *createInstance() = 0;
+ static std::vector<CameraSensorHelperFactoryBase *> &factories();
private:
- LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelperFactory)
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelperFactoryBase)
+
+ static void registerType(CameraSensorHelperFactoryBase *factory);
+
+ virtual std::unique_ptr<CameraSensorHelper> createInstance() const = 0;
std::string name_;
};
-#define REGISTER_CAMERA_SENSOR_HELPER(name, helper) \
-class helper##Factory final : public CameraSensorHelperFactory \
-{ \
-public: \
- helper##Factory() : CameraSensorHelperFactory(name) {} \
- \
-private: \
- CameraSensorHelper *createInstance() \
- { \
- return new helper(); \
- } \
-}; \
-static helper##Factory global_##helper##Factory;
+template<typename _Helper>
+class CameraSensorHelperFactory final : public CameraSensorHelperFactoryBase
+{
+public:
+ CameraSensorHelperFactory(const char *name)
+ : CameraSensorHelperFactoryBase(name)
+ {
+ }
+
+private:
+ std::unique_ptr<CameraSensorHelper> createInstance() const override
+ {
+ return std::make_unique<_Helper>();
+ }
+};
+
+#define REGISTER_CAMERA_SENSOR_HELPER(name, helper) \
+static CameraSensorHelperFactory<helper> global_##helper##Factory(name);
} /* namespace ipa */
diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp
new file mode 100644
index 00000000..683a564a
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.cpp
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+#include "exposure_mode_helper.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file exposure_mode_helper.h
+ * \brief Helper class that performs computations relating to exposure
+ *
+ * AEGC algorithms have a need to split exposure between shutter time, analogue
+ * and digital gain. Multiple implementations do so based on paired stages of
+ * shutter time and gain limits; provide a helper to avoid duplicating the code.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(ExposureModeHelper)
+
+namespace ipa {
+
+/**
+ * \class ExposureModeHelper
+ * \brief Class for splitting exposure into shutter time and total gain
+ *
+ * The ExposureModeHelper class provides a standard interface through which an
+ * AEGC algorithm can divide exposure between shutter time and gain. It is
+ * configured with a set of shutter time and gain pairs and works by initially
+ * fixing gain at 1.0 and increasing shutter time up to the shutter time value
+ * from the first pair in the set in an attempt to meet the required exposure
+ * value.
+ *
+ * If the required exposure is not achievable by the first shutter time value
+ * alone it ramps gain up to the value from the first pair in the set. If the
+ * required exposure is still not met it then allows shutter time to ramp up to
+ * the shutter time value from the second pair in the set, and continues in this
+ * vein until either the required exposure time is met, or else the hardware's
+ * shutter time or gain limits are reached.
+ *
+ * This method allows users to strike a balance between a well-exposed image and
+ * an acceptable frame-rate, as opposed to simply maximising shutter time
+ * followed by gain. The same helpers can be used to perform the latter
+ * operation if needed by passing an empty set of pairs to the initialisation
+ * function.
+ *
+ * The gain values may exceed a camera sensor's analogue gain limits if either
+ * it or the IPA is also capable of digital gain. The configure() function must
+ * be called with the hardware's limits to inform the helper of those
+ * constraints. Any gain that is needed will be applied as analogue gain first
+ * until the hardware's limit is reached, following which digital gain will be
+ * used.
+ */
+
+/**
+ * \brief Construct an ExposureModeHelper instance
+ * \param[in] stages The vector of paired shutter time and gain limits
+ *
+ * The input stages are shutter time and _total_ gain pairs; the gain
+ * encompasses both analogue and digital gain.
+ *
+ * The vector of stages may be empty. In that case, the helper will simply use
+ * the runtime limits set through setShutterGainLimits() instead.
+ */
+ExposureModeHelper::ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages)
+{
+ minShutter_ = 0us;
+ maxShutter_ = 0us;
+ minGain_ = 0;
+ maxGain_ = 0;
+
+ for (const auto &[s, g] : stages) {
+ shutters_.push_back(s);
+ gains_.push_back(g);
+ }
+}
+
+/**
+ * \brief Set the shutter time and gain limits
+ * \param[in] minShutter The minimum shutter time supported
+ * \param[in] maxShutter The maximum shutter time supported
+ * \param[in] minGain The minimum analogue gain supported
+ * \param[in] maxGain The maximum analogue gain supported
+ *
+ * This function configures the shutter time and analogue gain limits that need
+ * to be adhered to as the helper divides up exposure. Note that this function
+ * *must* be called whenever those limits change and before splitExposure() is
+ * used.
+ *
+ * If the algorithm using the helpers needs to indicate that either shutter time
+ * or analogue gain or both should be fixed it can do so by setting both the
+ * minima and maxima to the same value.
+ */
+void ExposureModeHelper::setLimits(utils::Duration minShutter,
+ utils::Duration maxShutter,
+ double minGain, double maxGain)
+{
+ minShutter_ = minShutter;
+ maxShutter_ = maxShutter;
+ minGain_ = minGain;
+ maxGain_ = maxGain;
+}
+
+utils::Duration ExposureModeHelper::clampShutter(utils::Duration shutter) const
+{
+ return std::clamp(shutter, minShutter_, maxShutter_);
+}
+
+double ExposureModeHelper::clampGain(double gain) const
+{
+ return std::clamp(gain, minGain_, maxGain_);
+}
+
+/**
+ * \brief Split exposure time into shutter time and gain
+ * \param[in] exposure Exposure time
+ *
+ * This function divides a given exposure time into shutter time, analogue and
+ * digital gain by iterating through stages of shutter time and gain limits. At
+ * each stage the current stage's shutter time limit is multiplied by the
+ * previous stage's gain limit (or 1.0 initially) to see if the combination of
+ * the two can meet the required exposure time. If they cannot then the current
+ * stage's shutter time limit is multiplied by the same stage's gain limit to
+ * see if that combination can meet the required exposure time. If they cannot
+ * then the function moves to consider the next stage.
+ *
+ * When a combination of shutter time and gain _stage_ limits are found that are
+ * sufficient to meet the required exposure time, the function attempts to
+ * reduce shutter time as much as possible whilst fixing gain and still meeting
+ * the exposure time. If a _runtime_ limit prevents shutter time from being
+ * lowered enough to meet the exposure time with gain fixed at the stage limit,
+ * gain is also lowered to compensate.
+ *
+ * Once the shutter time and gain values are ascertained, gain is assigned as
+ * analogue gain as much as possible, with digital gain only in use if the
+ * maximum analogue gain runtime limit is unable to accommodate the exposure
+ * value.
+ *
+ * If no combination of shutter time and gain limits is found that meets the
+ * required exposure time, the helper falls-back to simply maximising the
+ * shutter time first, followed by analogue gain, followed by digital gain.
+ *
+ * \return Tuple of shutter time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+ExposureModeHelper::splitExposure(utils::Duration exposure) const
+{
+ ASSERT(maxShutter_);
+ ASSERT(maxGain_);
+
+ bool gainFixed = minGain_ == maxGain_;
+ bool shutterFixed = minShutter_ == maxShutter_;
+
+ /*
+ * There's no point entering the loop if we cannot change either gain
+ * nor shutter anyway.
+ */
+ if (shutterFixed && gainFixed)
+ return { minShutter_, minGain_, exposure / (minShutter_ * minGain_) };
+
+ utils::Duration shutter;
+ double stageGain;
+ double gain;
+
+ for (unsigned int stage = 0; stage < gains_.size(); stage++) {
+ double lastStageGain = stage == 0 ? 1.0 : clampGain(gains_[stage - 1]);
+ utils::Duration stageShutter = clampShutter(shutters_[stage]);
+ stageGain = clampGain(gains_[stage]);
+
+ /*
+ * We perform the clamping on both shutter and gain in case the
+ * helper has had limits set that prevent those values being
+ * lowered beyond a certain minimum...this can happen at runtime
+ * for various reasons and so would not be known when the stage
+ * limits are initialised.
+ */
+
+ if (stageShutter * lastStageGain >= exposure) {
+ shutter = clampShutter(exposure / clampGain(lastStageGain));
+ gain = clampGain(exposure / shutter);
+
+ return { shutter, gain, exposure / (shutter * gain) };
+ }
+
+ if (stageShutter * stageGain >= exposure) {
+ shutter = clampShutter(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / shutter);
+
+ return { shutter, gain, exposure / (shutter * gain) };
+ }
+ }
+
+ /*
+ * From here on all we can do is max out the shutter time, followed by
+ * the analogue gain. If we still haven't achieved the target we send
+ * the rest of the exposure time to digital gain. If we were given no
+ * stages to use then set stageGain to 1.0 so that shutter time is maxed
+ * before gain touched at all.
+ */
+ if (gains_.empty())
+ stageGain = 1.0;
+
+ shutter = clampShutter(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / shutter);
+
+ return { shutter, gain, exposure / (shutter * gain) };
+}
+
+/**
+ * \fn ExposureModeHelper::minShutter()
+ * \brief Retrieve the configured minimum shutter time limit set through
+ * setShutterGainLimits()
+ * \return The minShutter_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxShutter()
+ * \brief Retrieve the configured maximum shutter time set through
+ * setShutterGainLimits()
+ * \return The maxShutter_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::minGain()
+ * \brief Retrieve the configured minimum gain set through
+ * setShutterGainLimits()
+ * \return The minGain_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxGain()
+ * \brief Retrieve the configured maximum gain set through
+ * setShutterGainLimits()
+ * \return The maxGain_ value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.h b/src/ipa/libipa/exposure_mode_helper.h
new file mode 100644
index 00000000..85c665d7
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+
+#pragma once
+
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class ExposureModeHelper
+{
+public:
+ ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages);
+ ~ExposureModeHelper() = default;
+
+ void setLimits(utils::Duration minShutter, utils::Duration maxShutter,
+ double minGain, double maxGain);
+
+ std::tuple<utils::Duration, double, double>
+ splitExposure(utils::Duration exposure) const;
+
+ utils::Duration minShutter() const { return minShutter_; }
+ utils::Duration maxShutter() const { return maxShutter_; }
+ double minGain() const { return minGain_; }
+ double maxGain() const { return maxGain_; }
+
+private:
+ utils::Duration clampShutter(utils::Duration shutter) const;
+ double clampGain(double gain) const;
+
+ std::vector<utils::Duration> shutters_;
+ std::vector<double> gains_;
+
+ utils::Duration minShutter_;
+ utils::Duration maxShutter_;
+ double minGain_;
+ double maxGain_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.cpp b/src/ipa/libipa/fc_queue.cpp
new file mode 100644
index 00000000..0365e919
--- /dev/null
+++ b/src/ipa/libipa/fc_queue.cpp
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * IPA Frame context queue
+ */
+
+#include "fc_queue.h"
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(FCQueue)
+
+namespace ipa {
+
+/**
+ * \file fc_queue.h
+ * \brief Queue of per-frame contexts
+ */
+
+/**
+ * \struct FrameContext
+ * \brief Context for a frame
+ *
+ * The frame context stores data specific to a single frame processed by the
+ * IPA module. Each frame processed by the IPA module has a context associated
+ * with it, accessible through the Frame Context Queue.
+ *
+ * Fields in the frame context should reflect values and controls associated
+ * with the specific frame as requested by the application, and as configured by
+ * the hardware. Fields can be read by algorithms to determine if they should
+ * update any specific action for this frame, and finally to update the metadata
+ * control lists when the frame is fully completed.
+ *
+ * \var FrameContext::frame
+ * \brief The frame number
+ */
+
+/**
+ * \class FCQueue
+ * \brief A support class for managing FrameContext instances in IPA modules
+ * \tparam FrameContext The IPA module-specific FrameContext derived class type
+ *
+ * Along with the Module and Algorithm classes, the frame context queue is a
+ * core component of the libipa infrastructure. It stores per-frame contexts
+ * used by the Algorithm operations. By centralizing the lifetime management of
+ * the contexts and implementing safeguards against underflows and overflows, it
+ * simplifies IPA modules and improves their reliability.
+ *
+ * The queue references frame contexts by a monotonically increasing sequence
+ * number. The FCQueue design assumes that this number matches both the sequence
+ * number of the corresponding frame, as generated by the camera sensor, and the
+ * sequence number of the request. This allows IPA modules to obtain the frame
+ * context from any location where a request or a frame is available.
+ *
+ * A frame context normally begins its lifetime when the corresponding request
+ * is queued, way before the frame is captured by the camera sensor. IPA modules
+ * allocate the context from the queue at that point, calling alloc() using the
+ * request number. The queue initializes the context, and the IPA module then
+ * populates it with data from the request. The context can be later retrieved
+ * with a call to get(), typically when the IPA module is requested to provide
+ * sensor or ISP parameters or receives statistics for a frame. The frame number
+ * is used at that point to identify the context.
+ *
+ * If an application fails to queue requests to the camera fast enough, frames
+ * may be produced by the camera sensor and processed by the IPA module without
+ * a corresponding request having been queued to the IPA module. This creates an
+ * underrun condition, where the IPA module will try to get a frame context that
+ * hasn't been allocated. In this case, the get() function will allocate and
+ * initialize a context for the frame, and log a message. Algorithms will not
+ * apply the controls associated with the late request, but should otherwise
+ * behave correctly.
+ *
+ * \todo Mark the frame context with a per-frame control error flag in case of
+ * underrun, and research how algorithms should handle this.
+ *
+ * At its core, the queue uses a circular buffer to avoid dynamic memory
+ * allocation at runtime. The buffer is pre-allocated with a maximum number of
+ * entries when the FCQueue instance is constructed. Entries are initialized on
+ * first use by alloc() or, in underrun conditions, get(). The queue is not
+ * allowed to overflow, which must be ensured by pipeline handlers never
+ * queuing more in-flight requests to the IPA module than the queue size. If an
+ * overflow condition is detected, the queue will log a fatal error.
+ *
+ * IPA module-specific frame context implementations shall inherit from the
+ * FrameContext base class to support the minimum required features for a
+ * FrameContext.
+ */
+
+/**
+ * \fn FCQueue::FCQueue(unsigned int size)
+ * \brief Construct a frame contexts queue of a specified size
+ * \param[in] size The number of contexts in the queue
+ */
+
+/**
+ * \fn FCQueue::clear()
+ * \brief Clear the contexts queue
+ *
+ * IPA modules must clear the frame context queue at the beginning of a new
+ * streaming session, in IPAModule::start().
+ *
+ * \todo Fix any issue this may cause with requests queued before the camera is
+ * started.
+ */
+
+/**
+ * \fn FCQueue::alloc(uint32_t frame)
+ * \brief Allocate and return a FrameContext for the \a frame
+ * \param[in] frame The frame context sequence number
+ *
+ * The first call to obtain a FrameContext from the FCQueue should be handled
+ * through this function. The FrameContext will be initialised, if not
+ * initialised already, and returned to the caller.
+ *
+ * If the FrameContext was already initialized for this \a frame, a warning will
+ * be reported and the previously initialized FrameContext is returned.
+ *
+ * Frame contexts are expected to be initialised when a Request is first passed
+ * to the IPA module in IPAModule::queueRequest().
+ *
+ * \return A reference to the FrameContext for sequence \a frame
+ */
+
+/**
+ * \fn FCQueue::get(uint32_t frame)
+ * \brief Obtain the FrameContext for the \a frame
+ * \param[in] frame The frame context sequence number
+ *
+ * If the FrameContext is not correctly initialised for the \a frame, it will be
+ * initialised.
+ *
+ * \return A reference to the FrameContext for sequence \a frame
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.h b/src/ipa/libipa/fc_queue.h
new file mode 100644
index 00000000..24d9e82b
--- /dev/null
+++ b/src/ipa/libipa/fc_queue.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * IPA Frame context queue
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(FCQueue)
+
+namespace ipa {
+
+template<typename FrameContext>
+class FCQueue;
+
+struct FrameContext {
+private:
+ template<typename T> friend class FCQueue;
+ uint32_t frame;
+};
+
+template<typename FrameContext>
+class FCQueue
+{
+public:
+ FCQueue(unsigned int size)
+ : contexts_(size)
+ {
+ }
+
+ void clear()
+ {
+ for (FrameContext &ctx : contexts_)
+ ctx.frame = 0;
+ }
+
+ FrameContext &alloc(const uint32_t frame)
+ {
+ FrameContext &frameContext = contexts_[frame % contexts_.size()];
+
+ /*
+ * Do not re-initialise if a get() call has already fetched this
+ * frame context to preseve the context.
+ *
+ * \todo If the the sequence number of the context to initialise
+ * is smaller than the sequence number of the queue slot to use,
+ * it means that we had a serious request underrun and more
+ * frames than the queue size has been produced since the last
+ * time the application has queued a request. Does this deserve
+ * an error condition ?
+ */
+ if (frame != 0 && frame <= frameContext.frame)
+ LOG(FCQueue, Warning)
+ << "Frame " << frame << " already initialised";
+ else
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+ FrameContext &get(uint32_t frame)
+ {
+ FrameContext &frameContext = contexts_[frame % contexts_.size()];
+
+ /*
+ * If the IPA algorithms try to access a frame context slot which
+ * has been already overwritten by a newer context, it means the
+ * frame context queue has overflowed and the desired context
+ * has been forever lost. The pipeline handler shall avoid
+ * queueing more requests to the IPA than the frame context
+ * queue size.
+ */
+ if (frame < frameContext.frame)
+ LOG(FCQueue, Fatal) << "Frame context for " << frame
+ << " has been overwritten by "
+ << frameContext.frame;
+
+ if (frame == frameContext.frame)
+ return frameContext;
+
+ /*
+ * The frame context has been retrieved before it was
+ * initialised through the initialise() call. This indicates an
+ * algorithm attempted to access a Frame context before it was
+ * queued to the IPA. Controls applied for this request may be
+ * left unhandled.
+ *
+ * \todo Set an error flag for per-frame control errors.
+ */
+ LOG(FCQueue, Warning)
+ << "Obtained an uninitialised FrameContext for " << frame;
+
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+private:
+ void init(FrameContext &frameContext, const uint32_t frame)
+ {
+ frameContext = {};
+ frameContext.frame = frame;
+ }
+
+ std::vector<FrameContext> contexts_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp
index d8ad1c89..5fbfadf5 100644
--- a/src/ipa/libipa/histogram.cpp
+++ b/src/ipa/libipa/histogram.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * histogram.cpp - histogram calculations
+ * histogram calculations
*/
#include "histogram.h"
@@ -29,18 +29,34 @@ namespace ipa {
*/
/**
+ * \fn Histogram::Histogram()
+ * \brief Construct an empty Histogram
+ *
+ * This empty constructor exists largely to allow Histograms to be embedded in
+ * other classes which may be created before the contents of the Histogram are
+ * known.
+ */
+
+/**
* \brief Create a cumulative histogram
- * \param[in] data A pre-sorted histogram to be passed
+ * \param[in] data A (non-cumulative) histogram
*/
Histogram::Histogram(Span<const uint32_t> data)
{
- cumulative_.reserve(data.size());
- cumulative_.push_back(0);
- for (const uint32_t &value : data)
- cumulative_.push_back(cumulative_.back() + value);
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + value;
}
/**
+ * \fn Histogram::Histogram(Span<const uint32_t> data, Transform transform)
+ * \brief Create a cumulative histogram
+ * \param[in] data A (non-cumulative) histogram
+ * \param[in] transform The transformation function to apply to every bin
+ */
+
+/**
* \fn Histogram::bins()
* \brief Retrieve the number of bins currently used by the Histogram
* \return Number of bins
@@ -53,7 +69,7 @@ Histogram::Histogram(Span<const uint32_t> data)
*/
/**
- * \brief Cumulative frequency up to a (fractional) point in a bin.
+ * \brief Cumulative frequency up to a (fractional) point in a bin
* \param[in] bin The bin up to which to cumulate
*
* With F(p) the cumulative frequency of the histogram, the value is 0 at
diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h
index 164d4603..032adca0 100644
--- a/src/ipa/libipa/histogram.h
+++ b/src/ipa/libipa/histogram.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * histogram.h - histogram calculation interface
+ * histogram calculation interface
*/
#pragma once
@@ -10,10 +10,11 @@
#include <assert.h>
#include <limits.h>
#include <stdint.h>
-
+#include <type_traits>
#include <vector>
#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
namespace libcamera {
@@ -22,7 +23,19 @@ namespace ipa {
class Histogram
{
public:
+ Histogram() { cumulative_.push_back(0); }
Histogram(Span<const uint32_t> data);
+
+ template<typename Transform,
+ std::enable_if_t<std::is_invocable_v<Transform, uint32_t>> * = nullptr>
+ Histogram(Span<const uint32_t> data, Transform transform)
+ {
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + transform(value);
+ }
+
size_t bins() const { return cumulative_.size() - 1; }
uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
uint64_t cumulativeFrequency(double bin) const;
diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build
index fb894bc6..7ce885da 100644
--- a/src/ipa/libipa/meson.build
+++ b/src/ipa/libipa/meson.build
@@ -1,15 +1,21 @@
# SPDX-License-Identifier: CC0-1.0
libipa_headers = files([
+ 'agc_mean_luminance.h',
'algorithm.h',
'camera_sensor_helper.h',
+ 'exposure_mode_helper.h',
+ 'fc_queue.h',
'histogram.h',
'module.h',
])
libipa_sources = files([
+ 'agc_mean_luminance.cpp',
'algorithm.cpp',
'camera_sensor_helper.cpp',
+ 'exposure_mode_helper.cpp',
+ 'fc_queue.cpp',
'histogram.cpp',
'module.cpp',
])
diff --git a/src/ipa/libipa/module.cpp b/src/ipa/libipa/module.cpp
index 77352104..64ca9141 100644
--- a/src/ipa/libipa/module.cpp
+++ b/src/ipa/libipa/module.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.cpp - IPA Module
+ * IPA Module
*/
#include "module.h"
@@ -17,7 +17,7 @@ namespace libcamera {
LOG_DEFINE_CATEGORY(IPAModuleAlgo)
/**
- * \brief The IPA namespace
+ * \brief The IPA (Image Processing Algorithm) namespace
*
* The IPA namespace groups all types specific to IPA modules. It serves as the
* top-level namespace for the IPA library libipa, and also contains
diff --git a/src/ipa/libipa/module.h b/src/ipa/libipa/module.h
index 4149a353..0fb51916 100644
--- a/src/ipa/libipa/module.h
+++ b/src/ipa/libipa/module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.h - IPA module
+ * IPA module
*/
#pragma once
diff --git a/src/ipa/meson.build b/src/ipa/meson.build
index e15a8a06..0ad4631d 100644
--- a/src/ipa/meson.build
+++ b/src/ipa/meson.build
@@ -28,14 +28,45 @@ ipa_names = []
ipa_modules = get_option('ipas')
-# The ipa-sign-install.sh script which uses the ipa_names variable will itself
-# prepend MESON_INSTALL_DESTDIR_PREFIX to each ipa module name, therefore we
-# must not include the prefix string here.
+# Tests require the vimc IPA, similar to vimc pipline-handler for their
+# execution. Include it automatically when tests are enabled.
+if get_option('test') and 'vimc' not in ipa_modules
+ message('Enabling vimc IPA to support tests')
+ ipa_modules += ['vimc']
+endif
+
+enabled_ipa_modules = []
+enabled_ipa_names = []
+ipa_names = []
+
+subdirs = []
foreach pipeline : pipelines
- if ipa_modules.contains(pipeline)
- subdir(pipeline)
- ipa_names += ipa_install_dir / ipa_name + '.so'
+ # The current implementation expects the IPA module name to match the
+ # pipeline name.
+ # \todo Make the IPA naming scheme more flexible.
+ if not ipa_modules.contains(pipeline)
+ continue
+ endif
+ enabled_ipa_names += pipeline
+
+ # Allow multi-level directory structuring for the IPAs if needed.
+ pipeline = pipeline.split('/')[0]
+ if pipeline in subdirs
+ continue
endif
+
+ subdirs += pipeline
+ subdir(pipeline)
+
+ # Don't reuse the pipeline variable below, the subdirectory may have
+ # overwritten it.
+endforeach
+
+# The ipa-sign-install.sh script which uses the enabled_ipa_modules variable
+# will itself prepend MESON_INSTALL_DESTDIR_PREFIX to each ipa module name,
+# therefore we must not include the prefix string here.
+foreach ipa_name : ipa_names
+ enabled_ipa_modules += ipa_install_dir / ipa_name + '.so'
endforeach
if ipa_sign_module
@@ -44,5 +75,6 @@ if ipa_sign_module
# install time, which invalidates the signatures.
meson.add_install_script('ipa-sign-install.sh',
ipa_priv_key.full_path(),
- ipa_names)
+ enabled_ipa_modules,
+ install_tag : 'runtime')
endif
diff --git a/src/ipa/raspberrypi/cam_helper.cpp b/src/ipa/raspberrypi/cam_helper.cpp
deleted file mode 100644
index 3f81d418..00000000
--- a/src/ipa/raspberrypi/cam_helper.cpp
+++ /dev/null
@@ -1,219 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * cam_helper.cpp - helper information for different sensors
- */
-
-#include <linux/videodev2.h>
-
-#include <assert.h>
-#include <map>
-#include <string.h>
-
-#include "libcamera/internal/v4l2_videodevice.h"
-
-#include "cam_helper.hpp"
-#include "md_parser.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-using libcamera::utils::Duration;
-
-namespace libcamera {
-LOG_DECLARE_CATEGORY(IPARPI)
-}
-
-static std::map<std::string, CamHelperCreateFunc> cam_helpers;
-
-CamHelper *CamHelper::Create(std::string const &cam_name)
-{
- /*
- * CamHelpers get registered by static RegisterCamHelper
- * initialisers.
- */
- for (auto &p : cam_helpers) {
- if (cam_name.find(p.first) != std::string::npos)
- return p.second();
- }
-
- return nullptr;
-}
-
-CamHelper::CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff)
- : parser_(std::move(parser)), initialized_(false),
- frameIntegrationDiff_(frameIntegrationDiff)
-{
-}
-
-CamHelper::~CamHelper()
-{
-}
-
-void CamHelper::Prepare(Span<const uint8_t> buffer,
- Metadata &metadata)
-{
- parseEmbeddedData(buffer, metadata);
-}
-
-void CamHelper::Process([[maybe_unused]] StatisticsPtr &stats,
- [[maybe_unused]] Metadata &metadata)
-{
-}
-
-uint32_t CamHelper::ExposureLines(const Duration exposure) const
-{
- assert(initialized_);
- return exposure / mode_.line_length;
-}
-
-Duration CamHelper::Exposure(uint32_t exposure_lines) const
-{
- assert(initialized_);
- return exposure_lines * mode_.line_length;
-}
-
-uint32_t CamHelper::GetVBlanking(Duration &exposure,
- Duration minFrameDuration,
- Duration maxFrameDuration) const
-{
- uint32_t frameLengthMin, frameLengthMax, vblank;
- uint32_t exposureLines = ExposureLines(exposure);
-
- assert(initialized_);
-
- /*
- * minFrameDuration and maxFrameDuration are clamped by the caller
- * based on the limits for the active sensor mode.
- */
- frameLengthMin = minFrameDuration / mode_.line_length;
- frameLengthMax = maxFrameDuration / mode_.line_length;
-
- /*
- * Limit the exposure to the maximum frame duration requested, and
- * re-calculate if it has been clipped.
- */
- exposureLines = std::min(frameLengthMax - frameIntegrationDiff_, exposureLines);
- exposure = Exposure(exposureLines);
-
- /* Limit the vblank to the range allowed by the frame length limits. */
- vblank = std::clamp(exposureLines + frameIntegrationDiff_,
- frameLengthMin, frameLengthMax) - mode_.height;
- return vblank;
-}
-
-void CamHelper::SetCameraMode(const CameraMode &mode)
-{
- mode_ = mode;
- if (parser_) {
- parser_->SetBitsPerPixel(mode.bitdepth);
- parser_->SetLineLengthBytes(0); /* We use SetBufferSize. */
- }
- initialized_ = true;
-}
-
-void CamHelper::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
-{
- /*
- * These values are correct for many sensors. Other sensors will
- * need to over-ride this function.
- */
- exposure_delay = 2;
- gain_delay = 1;
- vblank_delay = 2;
-}
-
-bool CamHelper::SensorEmbeddedDataPresent() const
-{
- return false;
-}
-
-double CamHelper::GetModeSensitivity([[maybe_unused]] const CameraMode &mode) const
-{
- /*
- * Most sensors have the same sensitivity in every mode, but this
- * function can be overridden for those that do not. Note that it is
- * called before mode_ is set, so it must return the sensitivity
- * of the mode that is passed in.
- */
- return 1.0;
-}
-
-unsigned int CamHelper::HideFramesStartup() const
-{
- /*
- * The number of frames when a camera first starts that shouldn't be
- * displayed as they are invalid in some way.
- */
- return 0;
-}
-
-unsigned int CamHelper::HideFramesModeSwitch() const
-{
- /* After a mode switch, many sensors return valid frames immediately. */
- return 0;
-}
-
-unsigned int CamHelper::MistrustFramesStartup() const
-{
- /* Many sensors return a single bad frame on start-up. */
- return 1;
-}
-
-unsigned int CamHelper::MistrustFramesModeSwitch() const
-{
- /* Many sensors return valid metadata immediately. */
- return 0;
-}
-
-void CamHelper::parseEmbeddedData(Span<const uint8_t> buffer,
- Metadata &metadata)
-{
- MdParser::RegisterMap registers;
- Metadata parsedMetadata;
-
- if (buffer.empty())
- return;
-
- if (parser_->Parse(buffer, registers) != MdParser::Status::OK) {
- LOG(IPARPI, Error) << "Embedded data buffer parsing failed";
- return;
- }
-
- PopulateMetadata(registers, parsedMetadata);
- metadata.Merge(parsedMetadata);
-
- /*
- * Overwrite the exposure/gain, frame length and sensor temperature values
- * in the existing DeviceStatus with values from the parsed embedded buffer.
- * Fetch it first in case any other fields were set meaningfully.
- */
- DeviceStatus deviceStatus, parsedDeviceStatus;
- if (metadata.Get("device.status", deviceStatus) ||
- parsedMetadata.Get("device.status", parsedDeviceStatus)) {
- LOG(IPARPI, Error) << "DeviceStatus not found";
- return;
- }
-
- deviceStatus.shutter_speed = parsedDeviceStatus.shutter_speed;
- deviceStatus.analogue_gain = parsedDeviceStatus.analogue_gain;
- deviceStatus.frame_length = parsedDeviceStatus.frame_length;
- if (parsedDeviceStatus.sensor_temperature)
- deviceStatus.sensor_temperature = parsedDeviceStatus.sensor_temperature;
-
- LOG(IPARPI, Debug) << "Metadata updated - " << deviceStatus;
-
- metadata.Set("device.status", deviceStatus);
-}
-
-void CamHelper::PopulateMetadata([[maybe_unused]] const MdParser::RegisterMap &registers,
- [[maybe_unused]] Metadata &metadata) const
-{
-}
-
-RegisterCamHelper::RegisterCamHelper(char const *cam_name,
- CamHelperCreateFunc create_func)
-{
- cam_helpers[std::string(cam_name)] = create_func;
-}
diff --git a/src/ipa/raspberrypi/cam_helper.hpp b/src/ipa/raspberrypi/cam_helper.hpp
deleted file mode 100644
index 300f8f8a..00000000
--- a/src/ipa/raspberrypi/cam_helper.hpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * cam_helper.hpp - helper class providing camera information
- */
-#pragma once
-
-#include <memory>
-#include <string>
-
-#include <libcamera/base/span.h>
-#include <libcamera/base/utils.h>
-
-#include "camera_mode.h"
-#include "controller/controller.hpp"
-#include "controller/metadata.hpp"
-#include "md_parser.hpp"
-
-#include "libcamera/internal/v4l2_videodevice.h"
-
-namespace RPiController {
-
-// The CamHelper class provides a number of facilities that anyone trying
-// to drive a camera will need to know, but which are not provided by the
-// standard driver framework. Specifically, it provides:
-//
-// A "CameraMode" structure to describe extra information about the chosen
-// mode of the driver. For example, how it is cropped from the full sensor
-// area, how it is scaled, whether pixels are averaged compared to the full
-// resolution.
-//
-// The ability to convert between number of lines of exposure and actual
-// exposure time, and to convert between the sensor's gain codes and actual
-// gains.
-//
-// A function to return the number of frames of delay between updating exposure,
-// analogue gain and vblanking, and for the changes to take effect. For many
-// sensors these take the values 2, 1 and 2 respectively, but sensors that are
-// different will need to over-ride the default function provided.
-//
-// A function to query if the sensor outputs embedded data that can be parsed.
-//
-// A function to return the sensitivity of a given camera mode.
-//
-// A parser to parse the embedded data buffers provided by some sensors (for
-// example, the imx219 does; the ov5647 doesn't). This allows us to know for
-// sure the exposure and gain of the frame we're looking at. CamHelper
-// provides functions for converting analogue gains to and from the sensor's
-// native gain codes.
-//
-// Finally, a set of functions that determine how to handle the vagaries of
-// different camera modules on start-up or when switching modes. Some
-// modules may produce one or more frames that are not yet correctly exposed,
-// or where the metadata may be suspect. We have the following functions:
-// HideFramesStartup(): Tell the pipeline handler not to return this many
-// frames at start-up. This can also be used to hide initial frames
-// while the AGC and other algorithms are sorting themselves out.
-// HideFramesModeSwitch(): Tell the pipeline handler not to return this
-// many frames after a mode switch (other than start-up). Some sensors
-// may produce innvalid frames after a mode switch; others may not.
-// MistrustFramesStartup(): At start-up a sensor may return frames for
-// which we should not run any control algorithms (for example, metadata
-// may be invalid).
-// MistrustFramesModeSwitch(): The number of frames, after a mode switch
-// (other than start-up), for which control algorithms should not run
-// (for example, metadata may be unreliable).
-
-class CamHelper
-{
-public:
- static CamHelper *Create(std::string const &cam_name);
- CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff);
- virtual ~CamHelper();
- void SetCameraMode(const CameraMode &mode);
- virtual void Prepare(libcamera::Span<const uint8_t> buffer,
- Metadata &metadata);
- virtual void Process(StatisticsPtr &stats, Metadata &metadata);
- virtual uint32_t ExposureLines(libcamera::utils::Duration exposure) const;
- virtual libcamera::utils::Duration Exposure(uint32_t exposure_lines) const;
- virtual uint32_t GetVBlanking(libcamera::utils::Duration &exposure,
- libcamera::utils::Duration minFrameDuration,
- libcamera::utils::Duration maxFrameDuration) const;
- virtual uint32_t GainCode(double gain) const = 0;
- virtual double Gain(uint32_t gain_code) const = 0;
- virtual void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const;
- virtual bool SensorEmbeddedDataPresent() const;
- virtual double GetModeSensitivity(const CameraMode &mode) const;
- virtual unsigned int HideFramesStartup() const;
- virtual unsigned int HideFramesModeSwitch() const;
- virtual unsigned int MistrustFramesStartup() const;
- virtual unsigned int MistrustFramesModeSwitch() const;
-
-protected:
- void parseEmbeddedData(libcamera::Span<const uint8_t> buffer,
- Metadata &metadata);
- virtual void PopulateMetadata(const MdParser::RegisterMap &registers,
- Metadata &metadata) const;
-
- std::unique_ptr<MdParser> parser_;
- CameraMode mode_;
-
-private:
- bool initialized_;
- /*
- * Smallest difference between the frame length and integration time,
- * in units of lines.
- */
- unsigned int frameIntegrationDiff_;
-};
-
-// This is for registering camera helpers with the system, so that the
-// CamHelper::Create function picks them up automatically.
-
-typedef CamHelper *(*CamHelperCreateFunc)();
-struct RegisterCamHelper
-{
- RegisterCamHelper(char const *cam_name,
- CamHelperCreateFunc create_func);
-};
-
-} // namespace RPi
diff --git a/src/ipa/raspberrypi/cam_helper_imx290.cpp b/src/ipa/raspberrypi/cam_helper_imx290.cpp
deleted file mode 100644
index 871c1f8e..00000000
--- a/src/ipa/raspberrypi/cam_helper_imx290.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
- *
- * cam_helper_imx290.cpp - camera helper for imx290 sensor
- */
-
-#include <math.h>
-
-#include "cam_helper.hpp"
-
-using namespace RPiController;
-
-class CamHelperImx290 : public CamHelper
-{
-public:
- CamHelperImx290();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
- unsigned int HideFramesModeSwitch() const override;
-
-private:
- /*
- * Smallest difference between the frame length and integration time,
- * in units of lines.
- */
- static constexpr int frameIntegrationDiff = 2;
-};
-
-CamHelperImx290::CamHelperImx290()
- : CamHelper({}, frameIntegrationDiff)
-{
-}
-
-uint32_t CamHelperImx290::GainCode(double gain) const
-{
- int code = 66.6667 * log10(gain);
- return std::max(0, std::min(code, 0xf0));
-}
-
-double CamHelperImx290::Gain(uint32_t gain_code) const
-{
- return pow(10, 0.015 * gain_code);
-}
-
-void CamHelperImx290::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
-{
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 2;
-}
-
-unsigned int CamHelperImx290::HideFramesModeSwitch() const
-{
- /* After a mode switch, we seem to get 1 bad frame. */
- return 1;
-}
-
-static CamHelper *Create()
-{
- return new CamHelperImx290();
-}
-
-static RegisterCamHelper reg("imx290", &Create);
diff --git a/src/ipa/raspberrypi/cam_helper_imx296.cpp b/src/ipa/raspberrypi/cam_helper_imx296.cpp
deleted file mode 100644
index a1a771cb..00000000
--- a/src/ipa/raspberrypi/cam_helper_imx296.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * cam_helper_imx296.cpp - Camera helper for IMX296 sensor
- */
-
-#include <algorithm>
-#include <cmath>
-#include <stddef.h>
-
-#include "cam_helper.hpp"
-
-using namespace RPiController;
-using libcamera::utils::Duration;
-using namespace std::literals::chrono_literals;
-
-class CamHelperImx296 : public CamHelper
-{
-public:
- CamHelperImx296();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- uint32_t ExposureLines(Duration exposure) const override;
- Duration Exposure(uint32_t exposure_lines) const override;
-
-private:
- static constexpr uint32_t maxGainCode = 239;
- static constexpr Duration timePerLine = 550.0 / 37.125e6 * 1.0s;
-
- /*
- * Smallest difference between the frame length and integration time,
- * in units of lines.
- */
- static constexpr int frameIntegrationDiff = 4;
-};
-
-CamHelperImx296::CamHelperImx296()
- : CamHelper(nullptr, frameIntegrationDiff)
-{
-}
-
-uint32_t CamHelperImx296::GainCode(double gain) const
-{
- uint32_t code = 20 * std::log10(gain) * 10;
- return std::min(code, maxGainCode);
-}
-
-double CamHelperImx296::Gain(uint32_t gain_code) const
-{
- return std::pow(10.0, gain_code / 200.0);
-}
-
-uint32_t CamHelperImx296::ExposureLines(Duration exposure) const
-{
- return (exposure - 14.26us) / timePerLine;
-}
-
-Duration CamHelperImx296::Exposure(uint32_t exposure_lines) const
-{
- return exposure_lines * timePerLine + 14.26us;
-}
-
-static CamHelper *Create()
-{
- return new CamHelperImx296();
-}
-
-static RegisterCamHelper reg("imx296", &Create);
diff --git a/src/ipa/raspberrypi/cam_helper_ov9281.cpp b/src/ipa/raspberrypi/cam_helper_ov9281.cpp
deleted file mode 100644
index 9de868c3..00000000
--- a/src/ipa/raspberrypi/cam_helper_ov9281.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
- *
- * cam_helper_ov9281.cpp - camera information for ov9281 sensor
- */
-
-#include <assert.h>
-
-#include "cam_helper.hpp"
-
-using namespace RPiController;
-
-class CamHelperOv9281 : public CamHelper
-{
-public:
- CamHelperOv9281();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
-
-private:
- /*
- * Smallest difference between the frame length and integration time,
- * in units of lines.
- */
- static constexpr int frameIntegrationDiff = 4;
-};
-
-/*
- * OV9281 doesn't output metadata, so we have to use the "unicam parser" which
- * works by counting frames.
- */
-
-CamHelperOv9281::CamHelperOv9281()
- : CamHelper({}, frameIntegrationDiff)
-{
-}
-
-uint32_t CamHelperOv9281::GainCode(double gain) const
-{
- return static_cast<uint32_t>(gain * 16.0);
-}
-
-double CamHelperOv9281::Gain(uint32_t gain_code) const
-{
- return static_cast<double>(gain_code) / 16.0;
-}
-
-void CamHelperOv9281::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
-{
- /* The driver appears to behave as follows: */
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 2;
-}
-
-static CamHelper *Create()
-{
- return new CamHelperOv9281();
-}
-
-static RegisterCamHelper reg("ov9281", &Create);
diff --git a/src/ipa/raspberrypi/controller/agc_algorithm.hpp b/src/ipa/raspberrypi/controller/agc_algorithm.hpp
deleted file mode 100644
index 61595ea2..00000000
--- a/src/ipa/raspberrypi/controller/agc_algorithm.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * agc_algorithm.hpp - AGC/AEC control algorithm interface
- */
-#pragma once
-
-#include <libcamera/base/utils.h>
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class AgcAlgorithm : public Algorithm
-{
-public:
- AgcAlgorithm(Controller *controller) : Algorithm(controller) {}
- // An AGC algorithm must provide the following:
- virtual unsigned int GetConvergenceFrames() const = 0;
- virtual void SetEv(double ev) = 0;
- virtual void SetFlickerPeriod(libcamera::utils::Duration flicker_period) = 0;
- virtual void SetFixedShutter(libcamera::utils::Duration fixed_shutter) = 0;
- virtual void SetMaxShutter(libcamera::utils::Duration max_shutter) = 0;
- virtual void SetFixedAnalogueGain(double fixed_analogue_gain) = 0;
- virtual void SetMeteringMode(std::string const &metering_mode_name) = 0;
- virtual void SetExposureMode(std::string const &exposure_mode_name) = 0;
- virtual void
- SetConstraintMode(std::string const &contraint_mode_name) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/agc_status.h b/src/ipa/raspberrypi/controller/agc_status.h
deleted file mode 100644
index 20cb1b62..00000000
--- a/src/ipa/raspberrypi/controller/agc_status.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * agc_status.h - AGC/AEC control algorithm status
- */
-#pragma once
-
-#include <libcamera/base/utils.h>
-
-// The AGC algorithm should post the following structure into the image's
-// "agc.status" metadata.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Note: total_exposure_value will be reported as zero until the algorithm has
-// seen statistics and calculated meaningful values. The contents should be
-// ignored until then.
-
-struct AgcStatus {
- libcamera::utils::Duration total_exposure_value; // value for all exposure and gain for this image
- libcamera::utils::Duration target_exposure_value; // (unfiltered) target total exposure AGC is aiming for
- libcamera::utils::Duration shutter_time;
- double analogue_gain;
- char exposure_mode[32];
- char constraint_mode[32];
- char metering_mode[32];
- double ev;
- libcamera::utils::Duration flicker_period;
- int floating_region_enable;
- libcamera::utils::Duration fixed_shutter;
- double fixed_analogue_gain;
- double digital_gain;
- int locked;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/algorithm.cpp b/src/ipa/raspberrypi/controller/algorithm.cpp
deleted file mode 100644
index 43ad0a2b..00000000
--- a/src/ipa/raspberrypi/controller/algorithm.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * algorithm.cpp - ISP control algorithms
- */
-
-#include "algorithm.hpp"
-
-using namespace RPiController;
-
-void Algorithm::Read([[maybe_unused]] boost::property_tree::ptree const &params)
-{
-}
-
-void Algorithm::Initialise() {}
-
-void Algorithm::SwitchMode([[maybe_unused]] CameraMode const &camera_mode,
- [[maybe_unused]] Metadata *metadata)
-{
-}
-
-void Algorithm::Prepare([[maybe_unused]] Metadata *image_metadata)
-{
-}
-
-void Algorithm::Process([[maybe_unused]] StatisticsPtr &stats,
- [[maybe_unused]] Metadata *image_metadata)
-{
-}
-
-// For registering algorithms with the system:
-
-static std::map<std::string, AlgoCreateFunc> algorithms;
-std::map<std::string, AlgoCreateFunc> const &RPiController::GetAlgorithms()
-{
- return algorithms;
-}
-
-RegisterAlgorithm::RegisterAlgorithm(char const *name,
- AlgoCreateFunc create_func)
-{
- algorithms[std::string(name)] = create_func;
-}
diff --git a/src/ipa/raspberrypi/controller/algorithm.hpp b/src/ipa/raspberrypi/controller/algorithm.hpp
deleted file mode 100644
index 5123c87b..00000000
--- a/src/ipa/raspberrypi/controller/algorithm.hpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * algorithm.hpp - ISP control algorithm interface
- */
-#pragma once
-
-// All algorithms should be derived from this class and made available to the
-// Controller.
-
-#include <string>
-#include <memory>
-#include <map>
-
-#include "controller.hpp"
-
-#include <boost/property_tree/ptree.hpp>
-
-namespace RPiController {
-
-// This defines the basic interface for all control algorithms.
-
-class Algorithm
-{
-public:
- Algorithm(Controller *controller)
- : controller_(controller), paused_(false)
- {
- }
- virtual ~Algorithm() = default;
- virtual char const *Name() const = 0;
- virtual bool IsPaused() const { return paused_; }
- virtual void Pause() { paused_ = true; }
- virtual void Resume() { paused_ = false; }
- virtual void Read(boost::property_tree::ptree const &params);
- virtual void Initialise();
- virtual void SwitchMode(CameraMode const &camera_mode, Metadata *metadata);
- virtual void Prepare(Metadata *image_metadata);
- virtual void Process(StatisticsPtr &stats, Metadata *image_metadata);
- Metadata &GetGlobalMetadata() const
- {
- return controller_->GetGlobalMetadata();
- }
-
-private:
- Controller *controller_;
- bool paused_;
-};
-
-// This code is for automatic registration of Front End algorithms with the
-// system.
-
-typedef Algorithm *(*AlgoCreateFunc)(Controller *controller);
-struct RegisterAlgorithm {
- RegisterAlgorithm(char const *name, AlgoCreateFunc create_func);
-};
-std::map<std::string, AlgoCreateFunc> const &GetAlgorithms();
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/alsc_status.h b/src/ipa/raspberrypi/controller/alsc_status.h
deleted file mode 100644
index d3f57971..00000000
--- a/src/ipa/raspberrypi/controller/alsc_status.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * alsc_status.h - ALSC (auto lens shading correction) control algorithm status
- */
-#pragma once
-
-// The ALSC algorithm should post the following structure into the image's
-// "alsc.status" metadata.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define ALSC_CELLS_X 16
-#define ALSC_CELLS_Y 12
-
-struct AlscStatus {
- double r[ALSC_CELLS_Y][ALSC_CELLS_X];
- double g[ALSC_CELLS_Y][ALSC_CELLS_X];
- double b[ALSC_CELLS_Y][ALSC_CELLS_X];
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/awb_algorithm.hpp b/src/ipa/raspberrypi/controller/awb_algorithm.hpp
deleted file mode 100644
index 96f88afc..00000000
--- a/src/ipa/raspberrypi/controller/awb_algorithm.hpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * awb_algorithm.hpp - AWB control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class AwbAlgorithm : public Algorithm
-{
-public:
- AwbAlgorithm(Controller *controller) : Algorithm(controller) {}
- // An AWB algorithm must provide the following:
- virtual unsigned int GetConvergenceFrames() const = 0;
- virtual void SetMode(std::string const &mode_name) = 0;
- virtual void SetManualGains(double manual_r, double manual_b) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/awb_status.h b/src/ipa/raspberrypi/controller/awb_status.h
deleted file mode 100644
index 46d7c842..00000000
--- a/src/ipa/raspberrypi/controller/awb_status.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * awb_status.h - AWB control algorithm status
- */
-#pragma once
-
-// The AWB algorithm places its results into both the image and global metadata,
-// under the tag "awb.status".
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct AwbStatus {
- char mode[32];
- double temperature_K;
- double gain_r;
- double gain_g;
- double gain_b;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/black_level_status.h b/src/ipa/raspberrypi/controller/black_level_status.h
deleted file mode 100644
index d085f64b..00000000
--- a/src/ipa/raspberrypi/controller/black_level_status.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * black_level_status.h - black level control algorithm status
- */
-#pragma once
-
-// The "black level" algorithm stores the black levels to use.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct BlackLevelStatus {
- uint16_t black_level_r; // out of 16 bits
- uint16_t black_level_g;
- uint16_t black_level_b;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/camera_mode.h b/src/ipa/raspberrypi/controller/camera_mode.h
deleted file mode 100644
index e2b82828..00000000
--- a/src/ipa/raspberrypi/controller/camera_mode.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019-2020, Raspberry Pi (Trading) Limited
- *
- * camera_mode.h - description of a particular operating mode of a sensor
- */
-#pragma once
-
-#include <libcamera/transform.h>
-
-#include <libcamera/base/utils.h>
-
-// Description of a "camera mode", holding enough information for control
-// algorithms to adapt their behaviour to the different modes of the camera,
-// including binning, scaling, cropping etc.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CAMERA_MODE_NAME_LEN 32
-
-struct CameraMode {
- // bit depth of the raw camera output
- uint32_t bitdepth;
- // size in pixels of frames in this mode
- uint16_t width, height;
- // size of full resolution uncropped frame ("sensor frame")
- uint16_t sensor_width, sensor_height;
- // binning factor (1 = no binning, 2 = 2-pixel binning etc.)
- uint8_t bin_x, bin_y;
- // location of top left pixel in the sensor frame
- uint16_t crop_x, crop_y;
- // scaling factor (so if uncropped, width*scale_x is sensor_width)
- double scale_x, scale_y;
- // scaling of the noise compared to the native sensor mode
- double noise_factor;
- // line time
- libcamera::utils::Duration line_length;
- // any camera transform *not* reflected already in the camera tuning
- libcamera::Transform transform;
- // minimum and maximum fame lengths in units of lines
- uint32_t min_frame_length, max_frame_length;
- // sensitivity of this mode
- double sensitivity;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/ccm_algorithm.hpp b/src/ipa/raspberrypi/controller/ccm_algorithm.hpp
deleted file mode 100644
index 33d0e30d..00000000
--- a/src/ipa/raspberrypi/controller/ccm_algorithm.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * ccm_algorithm.hpp - CCM (colour correction matrix) control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class CcmAlgorithm : public Algorithm
-{
-public:
- CcmAlgorithm(Controller *controller) : Algorithm(controller) {}
- // A CCM algorithm must provide the following:
- virtual void SetSaturation(double saturation) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/ccm_status.h b/src/ipa/raspberrypi/controller/ccm_status.h
deleted file mode 100644
index 7e41dd1f..00000000
--- a/src/ipa/raspberrypi/controller/ccm_status.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * ccm_status.h - CCM (colour correction matrix) control algorithm status
- */
-#pragma once
-
-// The "ccm" algorithm generates an appropriate colour matrix.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct CcmStatus {
- double matrix[9];
- double saturation;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/contrast_algorithm.hpp b/src/ipa/raspberrypi/controller/contrast_algorithm.hpp
deleted file mode 100644
index 7f03bba5..00000000
--- a/src/ipa/raspberrypi/controller/contrast_algorithm.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * contrast_algorithm.hpp - contrast (gamma) control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class ContrastAlgorithm : public Algorithm
-{
-public:
- ContrastAlgorithm(Controller *controller) : Algorithm(controller) {}
- // A contrast algorithm must provide the following:
- virtual void SetBrightness(double brightness) = 0;
- virtual void SetContrast(double contrast) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/contrast_status.h b/src/ipa/raspberrypi/controller/contrast_status.h
deleted file mode 100644
index d7edd4e9..00000000
--- a/src/ipa/raspberrypi/controller/contrast_status.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * contrast_status.h - contrast (gamma) control algorithm status
- */
-#pragma once
-
-// The "contrast" algorithm creates a gamma curve, optionally doing a little bit
-// of contrast stretching based on the AGC histogram.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CONTRAST_NUM_POINTS 33
-
-struct ContrastPoint {
- uint16_t x;
- uint16_t y;
-};
-
-struct ContrastStatus {
- struct ContrastPoint points[CONTRAST_NUM_POINTS];
- double brightness;
- double contrast;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/controller.cpp b/src/ipa/raspberrypi/controller/controller.cpp
deleted file mode 100644
index d3433ad2..00000000
--- a/src/ipa/raspberrypi/controller/controller.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * controller.cpp - ISP controller
- */
-
-#include <libcamera/base/log.h>
-
-#include "algorithm.hpp"
-#include "controller.hpp"
-
-#include <boost/property_tree/json_parser.hpp>
-#include <boost/property_tree/ptree.hpp>
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiController)
-
-Controller::Controller()
- : switch_mode_called_(false) {}
-
-Controller::Controller(char const *json_filename)
- : switch_mode_called_(false)
-{
- Read(json_filename);
- Initialise();
-}
-
-Controller::~Controller() {}
-
-void Controller::Read(char const *filename)
-{
- boost::property_tree::ptree root;
- boost::property_tree::read_json(filename, root);
- for (auto const &key_and_value : root) {
- Algorithm *algo = CreateAlgorithm(key_and_value.first.c_str());
- if (algo) {
- algo->Read(key_and_value.second);
- algorithms_.push_back(AlgorithmPtr(algo));
- } else
- LOG(RPiController, Warning)
- << "No algorithm found for \"" << key_and_value.first << "\"";
- }
-}
-
-Algorithm *Controller::CreateAlgorithm(char const *name)
-{
- auto it = GetAlgorithms().find(std::string(name));
- return it != GetAlgorithms().end() ? (*it->second)(this) : nullptr;
-}
-
-void Controller::Initialise()
-{
- for (auto &algo : algorithms_)
- algo->Initialise();
-}
-
-void Controller::SwitchMode(CameraMode const &camera_mode, Metadata *metadata)
-{
- for (auto &algo : algorithms_)
- algo->SwitchMode(camera_mode, metadata);
- switch_mode_called_ = true;
-}
-
-void Controller::Prepare(Metadata *image_metadata)
-{
- assert(switch_mode_called_);
- for (auto &algo : algorithms_)
- if (!algo->IsPaused())
- algo->Prepare(image_metadata);
-}
-
-void Controller::Process(StatisticsPtr stats, Metadata *image_metadata)
-{
- assert(switch_mode_called_);
- for (auto &algo : algorithms_)
- if (!algo->IsPaused())
- algo->Process(stats, image_metadata);
-}
-
-Metadata &Controller::GetGlobalMetadata()
-{
- return global_metadata_;
-}
-
-Algorithm *Controller::GetAlgorithm(std::string const &name) const
-{
- // The passed name must be the entire algorithm name, or must match the
- // last part of it with a period (.) just before.
- size_t name_len = name.length();
- for (auto &algo : algorithms_) {
- char const *algo_name = algo->Name();
- size_t algo_name_len = strlen(algo_name);
- if (algo_name_len >= name_len &&
- strcasecmp(name.c_str(),
- algo_name + algo_name_len - name_len) == 0 &&
- (name_len == algo_name_len ||
- algo_name[algo_name_len - name_len - 1] == '.'))
- return algo.get();
- }
- return nullptr;
-}
diff --git a/src/ipa/raspberrypi/controller/controller.hpp b/src/ipa/raspberrypi/controller/controller.hpp
deleted file mode 100644
index 3b50ae77..00000000
--- a/src/ipa/raspberrypi/controller/controller.hpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * controller.hpp - ISP controller interface
- */
-#pragma once
-
-// The Controller is simply a container for a collecting together a number of
-// "control algorithms" (such as AWB etc.) and for running them all in a
-// convenient manner.
-
-#include <vector>
-#include <string>
-
-#include <linux/bcm2835-isp.h>
-
-#include "camera_mode.h"
-#include "device_status.h"
-#include "metadata.hpp"
-
-namespace RPiController {
-
-class Algorithm;
-typedef std::unique_ptr<Algorithm> AlgorithmPtr;
-typedef std::shared_ptr<bcm2835_isp_stats> StatisticsPtr;
-
-// The Controller holds a pointer to some global_metadata, which is how
-// different controllers and control algorithms within them can exchange
-// information. The Prepare function returns a pointer to metadata for this
-// specific image, and which should be passed on to the Process function.
-
-class Controller
-{
-public:
- Controller();
- Controller(char const *json_filename);
- ~Controller();
- Algorithm *CreateAlgorithm(char const *name);
- void Read(char const *filename);
- void Initialise();
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata);
- void Prepare(Metadata *image_metadata);
- void Process(StatisticsPtr stats, Metadata *image_metadata);
- Metadata &GetGlobalMetadata();
- Algorithm *GetAlgorithm(std::string const &name) const;
-
-protected:
- Metadata global_metadata_;
- std::vector<AlgorithmPtr> algorithms_;
- bool switch_mode_called_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/denoise_algorithm.hpp b/src/ipa/raspberrypi/controller/denoise_algorithm.hpp
deleted file mode 100644
index 39fcd7e9..00000000
--- a/src/ipa/raspberrypi/controller/denoise_algorithm.hpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
- *
- * denoise.hpp - Denoise control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-enum class DenoiseMode { Off, ColourOff, ColourFast, ColourHighQuality };
-
-class DenoiseAlgorithm : public Algorithm
-{
-public:
- DenoiseAlgorithm(Controller *controller) : Algorithm(controller) {}
- // A Denoise algorithm must provide the following:
- virtual void SetMode(DenoiseMode mode) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/denoise_status.h b/src/ipa/raspberrypi/controller/denoise_status.h
deleted file mode 100644
index 67a3c361..00000000
--- a/src/ipa/raspberrypi/controller/denoise_status.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
- *
- * denoise_status.h - Denoise control algorithm status
- */
-#pragma once
-
-// This stores the parameters required for Denoise.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct DenoiseStatus {
- double noise_constant;
- double noise_slope;
- double strength;
- unsigned int mode;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/device_status.cpp b/src/ipa/raspberrypi/controller/device_status.cpp
deleted file mode 100644
index a389c40d..00000000
--- a/src/ipa/raspberrypi/controller/device_status.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
- *
- * device_status.cpp - device (image sensor) status
- */
-#include "device_status.h"
-
-using namespace libcamera; /* for the Duration operator<< overload */
-
-std::ostream &operator<<(std::ostream &out, const DeviceStatus &d)
-{
- out << "Exposure: " << d.shutter_speed
- << " Frame length: " << d.frame_length
- << " Gain: " << d.analogue_gain;
-
- if (d.aperture)
- out << " Aperture: " << *d.aperture;
-
- if (d.lens_position)
- out << " Lens: " << *d.lens_position;
-
- if (d.flash_intensity)
- out << " Flash: " << *d.flash_intensity;
-
- if (d.sensor_temperature)
- out << " Temperature: " << *d.sensor_temperature;
-
- return out;
-}
diff --git a/src/ipa/raspberrypi/controller/dpc_status.h b/src/ipa/raspberrypi/controller/dpc_status.h
deleted file mode 100644
index a3ec2762..00000000
--- a/src/ipa/raspberrypi/controller/dpc_status.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * dpc_status.h - DPC (defective pixel correction) control algorithm status
- */
-#pragma once
-
-// The "DPC" algorithm sets defective pixel correction strength.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct DpcStatus {
- int strength; // 0 = "off", 1 = "normal", 2 = "strong"
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/focus_status.h b/src/ipa/raspberrypi/controller/focus_status.h
deleted file mode 100644
index ace2fe2c..00000000
--- a/src/ipa/raspberrypi/controller/focus_status.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * focus_status.h - focus measurement status
- */
-#pragma once
-
-#include <linux/bcm2835-isp.h>
-
-// The focus algorithm should post the following structure into the image's
-// "focus.status" metadata. Recall that it's only reporting focus (contrast)
-// measurements, it's not driving any kind of auto-focus algorithm!
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct FocusStatus {
- unsigned int num;
- uint32_t focus_measures[FOCUS_REGIONS];
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/geq_status.h b/src/ipa/raspberrypi/controller/geq_status.h
deleted file mode 100644
index 07fd5f03..00000000
--- a/src/ipa/raspberrypi/controller/geq_status.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * geq_status.h - GEQ (green equalisation) control algorithm status
- */
-#pragma once
-
-// The "GEQ" algorithm calculates the green equalisation thresholds
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct GeqStatus {
- uint16_t offset;
- double slope;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/histogram.cpp b/src/ipa/raspberrypi/controller/histogram.cpp
deleted file mode 100644
index 9916b3ed..00000000
--- a/src/ipa/raspberrypi/controller/histogram.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * histogram.cpp - histogram calculations
- */
-#include <math.h>
-#include <stdio.h>
-
-#include "histogram.hpp"
-
-using namespace RPiController;
-
-uint64_t Histogram::CumulativeFreq(double bin) const
-{
- if (bin <= 0)
- return 0;
- else if (bin >= Bins())
- return Total();
- int b = (int)bin;
- return cumulative_[b] +
- (bin - b) * (cumulative_[b + 1] - cumulative_[b]);
-}
-
-double Histogram::Quantile(double q, int first, int last) const
-{
- if (first == -1)
- first = 0;
- if (last == -1)
- last = cumulative_.size() - 2;
- assert(first <= last);
- uint64_t items = q * Total();
- while (first < last) // binary search to find the right bin
- {
- int middle = (first + last) / 2;
- if (cumulative_[middle + 1] > items)
- last = middle; // between first and middle
- else
- first = middle + 1; // after middle
- }
- assert(items >= cumulative_[first] && items <= cumulative_[last + 1]);
- double frac = cumulative_[first + 1] == cumulative_[first] ? 0
- : (double)(items - cumulative_[first]) /
- (cumulative_[first + 1] - cumulative_[first]);
- return first + frac;
-}
-
-double Histogram::InterQuantileMean(double q_lo, double q_hi) const
-{
- assert(q_hi > q_lo);
- double p_lo = Quantile(q_lo);
- double p_hi = Quantile(q_hi, (int)p_lo);
- double sum_bin_freq = 0, cumul_freq = 0;
- for (double p_next = floor(p_lo) + 1.0; p_next <= ceil(p_hi);
- p_lo = p_next, p_next += 1.0) {
- int bin = floor(p_lo);
- double freq = (cumulative_[bin + 1] - cumulative_[bin]) *
- (std::min(p_next, p_hi) - p_lo);
- sum_bin_freq += bin * freq;
- cumul_freq += freq;
- }
- // add 0.5 to give an average for bin mid-points
- return sum_bin_freq / cumul_freq + 0.5;
-}
diff --git a/src/ipa/raspberrypi/controller/histogram.hpp b/src/ipa/raspberrypi/controller/histogram.hpp
deleted file mode 100644
index 90f5ac78..00000000
--- a/src/ipa/raspberrypi/controller/histogram.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * histogram.hpp - histogram calculation interface
- */
-#pragma once
-
-#include <stdint.h>
-#include <vector>
-#include <cassert>
-
-// A simple histogram class, for use in particular to find "quantiles" and
-// averages between "quantiles".
-
-namespace RPiController {
-
-class Histogram
-{
-public:
- template<typename T> Histogram(T *histogram, int num)
- {
- assert(num);
- cumulative_.reserve(num + 1);
- cumulative_.push_back(0);
- for (int i = 0; i < num; i++)
- cumulative_.push_back(cumulative_.back() +
- histogram[i]);
- }
- uint32_t Bins() const { return cumulative_.size() - 1; }
- uint64_t Total() const { return cumulative_[cumulative_.size() - 1]; }
- // Cumulative frequency up to a (fractional) point in a bin.
- uint64_t CumulativeFreq(double bin) const;
- // Return the (fractional) bin of the point q (0 <= q <= 1) through the
- // histogram. Optionally provide limits to help.
- double Quantile(double q, int first = -1, int last = -1) const;
- // Return the average histogram bin value between the two quantiles.
- double InterQuantileMean(double q_lo, double q_hi) const;
-
-private:
- std::vector<uint64_t> cumulative_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/lux_status.h b/src/ipa/raspberrypi/controller/lux_status.h
deleted file mode 100644
index 8ccfd933..00000000
--- a/src/ipa/raspberrypi/controller/lux_status.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * lux_status.h - Lux control algorithm status
- */
-#pragma once
-
-// The "lux" algorithm looks at the (AGC) histogram statistics of the frame and
-// estimates the current lux level of the scene. It does this by a simple ratio
-// calculation comparing to a reference image that was taken in known conditions
-// with known statistics and a properly measured lux level. There is a slight
-// problem with aperture, in that it may be variable without the system knowing
-// or being aware of it. In this case an external application may set a
-// "current_aperture" value if it wishes, which would be used in place of the
-// (presumably meaningless) value in the image metadata.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct LuxStatus {
- double lux;
- double aperture;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/metadata.hpp b/src/ipa/raspberrypi/controller/metadata.hpp
deleted file mode 100644
index 51e576cf..00000000
--- a/src/ipa/raspberrypi/controller/metadata.hpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
- *
- * metadata.hpp - general metadata class
- */
-#pragma once
-
-// A simple class for carrying arbitrary metadata, for example about an image.
-
-#include <any>
-#include <map>
-#include <mutex>
-#include <string>
-
-namespace RPiController {
-
-class Metadata
-{
-public:
- Metadata() = default;
-
- Metadata(Metadata const &other)
- {
- std::scoped_lock other_lock(other.mutex_);
- data_ = other.data_;
- }
-
- Metadata(Metadata &&other)
- {
- std::scoped_lock other_lock(other.mutex_);
- data_ = std::move(other.data_);
- other.data_.clear();
- }
-
- template<typename T>
- void Set(std::string const &tag, T const &value)
- {
- std::scoped_lock lock(mutex_);
- data_[tag] = value;
- }
-
- template<typename T>
- int Get(std::string const &tag, T &value) const
- {
- std::scoped_lock lock(mutex_);
- auto it = data_.find(tag);
- if (it == data_.end())
- return -1;
- value = std::any_cast<T>(it->second);
- return 0;
- }
-
- void Clear()
- {
- std::scoped_lock lock(mutex_);
- data_.clear();
- }
-
- Metadata &operator=(Metadata const &other)
- {
- std::scoped_lock lock(mutex_, other.mutex_);
- data_ = other.data_;
- return *this;
- }
-
- Metadata &operator=(Metadata &&other)
- {
- std::scoped_lock lock(mutex_, other.mutex_);
- data_ = std::move(other.data_);
- other.data_.clear();
- return *this;
- }
-
- void Merge(Metadata &other)
- {
- std::scoped_lock lock(mutex_, other.mutex_);
- data_.merge(other.data_);
- }
-
- template<typename T>
- T *GetLocked(std::string const &tag)
- {
- // This allows in-place access to the Metadata contents,
- // for which you should be holding the lock.
- auto it = data_.find(tag);
- if (it == data_.end())
- return nullptr;
- return std::any_cast<T>(&it->second);
- }
-
- template<typename T>
- void SetLocked(std::string const &tag, T const &value)
- {
- // Use this only if you're holding the lock yourself.
- data_[tag] = value;
- }
-
- // Note: use of (lowercase) lock and unlock means you can create scoped
- // locks with the standard lock classes.
- // e.g. std::lock_guard<RPiController::Metadata> lock(metadata)
- void lock() { mutex_.lock(); }
- void unlock() { mutex_.unlock(); }
-
-private:
- mutable std::mutex mutex_;
- std::map<std::string, std::any> data_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/noise_status.h b/src/ipa/raspberrypi/controller/noise_status.h
deleted file mode 100644
index 8439a402..00000000
--- a/src/ipa/raspberrypi/controller/noise_status.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * noise_status.h - Noise control algorithm status
- */
-#pragma once
-
-// The "noise" algorithm stores an estimate of the noise profile for this image.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct NoiseStatus {
- double noise_constant;
- double noise_slope;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/controller/pwl.cpp b/src/ipa/raspberrypi/controller/pwl.cpp
deleted file mode 100644
index 130c820b..00000000
--- a/src/ipa/raspberrypi/controller/pwl.cpp
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * pwl.cpp - piecewise linear functions
- */
-
-#include <cassert>
-#include <stdexcept>
-
-#include "pwl.hpp"
-
-using namespace RPiController;
-
-void Pwl::Read(boost::property_tree::ptree const &params)
-{
- for (auto it = params.begin(); it != params.end(); it++) {
- double x = it->second.get_value<double>();
- assert(it == params.begin() || x > points_.back().x);
- it++;
- double y = it->second.get_value<double>();
- points_.push_back(Point(x, y));
- }
- assert(points_.size() >= 2);
-}
-
-void Pwl::Append(double x, double y, const double eps)
-{
- if (points_.empty() || points_.back().x + eps < x)
- points_.push_back(Point(x, y));
-}
-
-void Pwl::Prepend(double x, double y, const double eps)
-{
- if (points_.empty() || points_.front().x - eps > x)
- points_.insert(points_.begin(), Point(x, y));
-}
-
-Pwl::Interval Pwl::Domain() const
-{
- return Interval(points_[0].x, points_[points_.size() - 1].x);
-}
-
-Pwl::Interval Pwl::Range() const
-{
- double lo = points_[0].y, hi = lo;
- for (auto &p : points_)
- lo = std::min(lo, p.y), hi = std::max(hi, p.y);
- return Interval(lo, hi);
-}
-
-bool Pwl::Empty() const
-{
- return points_.empty();
-}
-
-double Pwl::Eval(double x, int *span_ptr, bool update_span) const
-{
- int span = findSpan(x, span_ptr && *span_ptr != -1
- ? *span_ptr
- : points_.size() / 2 - 1);
- if (span_ptr && update_span)
- *span_ptr = span;
- return points_[span].y +
- (x - points_[span].x) * (points_[span + 1].y - points_[span].y) /
- (points_[span + 1].x - points_[span].x);
-}
-
-int Pwl::findSpan(double x, int span) const
-{
- // Pwls are generally small, so linear search may well be faster than
- // binary, though could review this if large PWls start turning up.
- int last_span = points_.size() - 2;
- // some algorithms may call us with span pointing directly at the last
- // control point
- span = std::max(0, std::min(last_span, span));
- while (span < last_span && x >= points_[span + 1].x)
- span++;
- while (span && x < points_[span].x)
- span--;
- return span;
-}
-
-Pwl::PerpType Pwl::Invert(Point const &xy, Point &perp, int &span,
- const double eps) const
-{
- assert(span >= -1);
- bool prev_off_end = false;
- for (span = span + 1; span < (int)points_.size() - 1; span++) {
- Point span_vec = points_[span + 1] - points_[span];
- double t = ((xy - points_[span]) % span_vec) / span_vec.Len2();
- if (t < -eps) // off the start of this span
- {
- if (span == 0) {
- perp = points_[span];
- return PerpType::Start;
- } else if (prev_off_end) {
- perp = points_[span];
- return PerpType::Vertex;
- }
- } else if (t > 1 + eps) // off the end of this span
- {
- if (span == (int)points_.size() - 2) {
- perp = points_[span + 1];
- return PerpType::End;
- }
- prev_off_end = true;
- } else // a true perpendicular
- {
- perp = points_[span] + span_vec * t;
- return PerpType::Perpendicular;
- }
- }
- return PerpType::None;
-}
-
-Pwl Pwl::Inverse(bool *true_inverse, const double eps) const
-{
- bool appended = false, prepended = false, neither = false;
- Pwl inverse;
-
- for (Point const &p : points_) {
- if (inverse.Empty())
- inverse.Append(p.y, p.x, eps);
- else if (std::abs(inverse.points_.back().x - p.y) <= eps ||
- std::abs(inverse.points_.front().x - p.y) <= eps)
- /* do nothing */;
- else if (p.y > inverse.points_.back().x) {
- inverse.Append(p.y, p.x, eps);
- appended = true;
- } else if (p.y < inverse.points_.front().x) {
- inverse.Prepend(p.y, p.x, eps);
- prepended = true;
- } else
- neither = true;
- }
-
- // This is not a proper inverse if we found ourselves putting points
- // onto both ends of the inverse, or if there were points that couldn't
- // go on either.
- if (true_inverse)
- *true_inverse = !(neither || (appended && prepended));
-
- return inverse;
-}
-
-Pwl Pwl::Compose(Pwl const &other, const double eps) const
-{
- double this_x = points_[0].x, this_y = points_[0].y;
- int this_span = 0, other_span = other.findSpan(this_y, 0);
- Pwl result({ { this_x, other.Eval(this_y, &other_span, false) } });
- while (this_span != (int)points_.size() - 1) {
- double dx = points_[this_span + 1].x - points_[this_span].x,
- dy = points_[this_span + 1].y - points_[this_span].y;
- if (abs(dy) > eps &&
- other_span + 1 < (int)other.points_.size() &&
- points_[this_span + 1].y >=
- other.points_[other_span + 1].x + eps) {
- // next control point in result will be where this
- // function's y reaches the next span in other
- this_x = points_[this_span].x +
- (other.points_[other_span + 1].x -
- points_[this_span].y) * dx / dy;
- this_y = other.points_[++other_span].x;
- } else if (abs(dy) > eps && other_span > 0 &&
- points_[this_span + 1].y <=
- other.points_[other_span - 1].x - eps) {
- // next control point in result will be where this
- // function's y reaches the previous span in other
- this_x = points_[this_span].x +
- (other.points_[other_span + 1].x -
- points_[this_span].y) * dx / dy;
- this_y = other.points_[--other_span].x;
- } else {
- // we stay in the same span in other
- this_span++;
- this_x = points_[this_span].x,
- this_y = points_[this_span].y;
- }
- result.Append(this_x, other.Eval(this_y, &other_span, false),
- eps);
- }
- return result;
-}
-
-void Pwl::Map(std::function<void(double x, double y)> f) const
-{
- for (auto &pt : points_)
- f(pt.x, pt.y);
-}
-
-void Pwl::Map2(Pwl const &pwl0, Pwl const &pwl1,
- std::function<void(double x, double y0, double y1)> f)
-{
- int span0 = 0, span1 = 0;
- double x = std::min(pwl0.points_[0].x, pwl1.points_[0].x);
- f(x, pwl0.Eval(x, &span0, false), pwl1.Eval(x, &span1, false));
- while (span0 < (int)pwl0.points_.size() - 1 ||
- span1 < (int)pwl1.points_.size() - 1) {
- if (span0 == (int)pwl0.points_.size() - 1)
- x = pwl1.points_[++span1].x;
- else if (span1 == (int)pwl1.points_.size() - 1)
- x = pwl0.points_[++span0].x;
- else if (pwl0.points_[span0 + 1].x > pwl1.points_[span1 + 1].x)
- x = pwl1.points_[++span1].x;
- else
- x = pwl0.points_[++span0].x;
- f(x, pwl0.Eval(x, &span0, false), pwl1.Eval(x, &span1, false));
- }
-}
-
-Pwl Pwl::Combine(Pwl const &pwl0, Pwl const &pwl1,
- std::function<double(double x, double y0, double y1)> f,
- const double eps)
-{
- Pwl result;
- Map2(pwl0, pwl1, [&](double x, double y0, double y1) {
- result.Append(x, f(x, y0, y1), eps);
- });
- return result;
-}
-
-void Pwl::MatchDomain(Interval const &domain, bool clip, const double eps)
-{
- int span = 0;
- Prepend(domain.start, Eval(clip ? points_[0].x : domain.start, &span),
- eps);
- span = points_.size() - 2;
- Append(domain.end, Eval(clip ? points_.back().x : domain.end, &span),
- eps);
-}
-
-Pwl &Pwl::operator*=(double d)
-{
- for (auto &pt : points_)
- pt.y *= d;
- return *this;
-}
-
-void Pwl::Debug(FILE *fp) const
-{
- fprintf(fp, "Pwl {\n");
- for (auto &p : points_)
- fprintf(fp, "\t(%g, %g)\n", p.x, p.y);
- fprintf(fp, "}\n");
-}
diff --git a/src/ipa/raspberrypi/controller/pwl.hpp b/src/ipa/raspberrypi/controller/pwl.hpp
deleted file mode 100644
index 484672f6..00000000
--- a/src/ipa/raspberrypi/controller/pwl.hpp
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * pwl.hpp - piecewise linear functions interface
- */
-#pragma once
-
-#include <math.h>
-#include <vector>
-
-#include <boost/property_tree/ptree.hpp>
-
-namespace RPiController {
-
-class Pwl
-{
-public:
- struct Interval {
- Interval(double _start, double _end) : start(_start), end(_end)
- {
- }
- double start, end;
- bool Contains(double value)
- {
- return value >= start && value <= end;
- }
- double Clip(double value)
- {
- return value < start ? start
- : (value > end ? end : value);
- }
- double Len() const { return end - start; }
- };
- struct Point {
- Point() : x(0), y(0) {}
- Point(double _x, double _y) : x(_x), y(_y) {}
- double x, y;
- Point operator-(Point const &p) const
- {
- return Point(x - p.x, y - p.y);
- }
- Point operator+(Point const &p) const
- {
- return Point(x + p.x, y + p.y);
- }
- double operator%(Point const &p) const
- {
- return x * p.x + y * p.y;
- }
- Point operator*(double f) const { return Point(x * f, y * f); }
- Point operator/(double f) const { return Point(x / f, y / f); }
- double Len2() const { return x * x + y * y; }
- double Len() const { return sqrt(Len2()); }
- };
- Pwl() {}
- Pwl(std::vector<Point> const &points) : points_(points) {}
- void Read(boost::property_tree::ptree const &params);
- void Append(double x, double y, const double eps = 1e-6);
- void Prepend(double x, double y, const double eps = 1e-6);
- Interval Domain() const;
- Interval Range() const;
- bool Empty() const;
- // Evaluate Pwl, optionally supplying an initial guess for the
- // "span". The "span" may be optionally be updated. If you want to know
- // the "span" value but don't have an initial guess you can set it to
- // -1.
- double Eval(double x, int *span_ptr = nullptr,
- bool update_span = true) const;
- // Find perpendicular closest to xy, starting from span+1 so you can
- // call it repeatedly to check for multiple closest points (set span to
- // -1 on the first call). Also returns "pseudo" perpendiculars; see
- // PerpType enum.
- enum class PerpType {
- None, // no perpendicular found
- Start, // start of Pwl is closest point
- End, // end of Pwl is closest point
- Vertex, // vertex of Pwl is closest point
- Perpendicular // true perpendicular found
- };
- PerpType Invert(Point const &xy, Point &perp, int &span,
- const double eps = 1e-6) const;
- // Compute the inverse function. Indicate if it is a proper (true)
- // inverse, or only a best effort (e.g. input was non-monotonic).
- Pwl Inverse(bool *true_inverse = nullptr, const double eps = 1e-6) const;
- // Compose two Pwls together, doing "this" first and "other" after.
- Pwl Compose(Pwl const &other, const double eps = 1e-6) const;
- // Apply function to (x,y) values at every control point.
- void Map(std::function<void(double x, double y)> f) const;
- // Apply function to (x, y0, y1) values wherever either Pwl has a
- // control point.
- static void Map2(Pwl const &pwl0, Pwl const &pwl1,
- std::function<void(double x, double y0, double y1)> f);
- // Combine two Pwls, meaning we create a new Pwl where the y values are
- // given by running f wherever either has a knot.
- static Pwl
- Combine(Pwl const &pwl0, Pwl const &pwl1,
- std::function<double(double x, double y0, double y1)> f,
- const double eps = 1e-6);
- // Make "this" match (at least) the given domain. Any extension my be
- // clipped or linear.
- void MatchDomain(Interval const &domain, bool clip = true,
- const double eps = 1e-6);
- Pwl &operator*=(double d);
- void Debug(FILE *fp = stdout) const;
-
-private:
- int findSpan(double x, int span) const;
- std::vector<Point> points_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.cpp b/src/ipa/raspberrypi/controller/rpi/agc.cpp
deleted file mode 100644
index f6a9cb0a..00000000
--- a/src/ipa/raspberrypi/controller/rpi/agc.cpp
+++ /dev/null
@@ -1,797 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * agc.cpp - AGC/AEC control algorithm
- */
-
-#include <map>
-
-#include <linux/bcm2835-isp.h>
-
-#include <libcamera/base/log.h>
-
-#include "../awb_status.h"
-#include "../device_status.h"
-#include "../histogram.hpp"
-#include "../lux_status.h"
-#include "../metadata.hpp"
-
-#include "agc.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-using libcamera::utils::Duration;
-using namespace std::literals::chrono_literals;
-
-LOG_DEFINE_CATEGORY(RPiAgc)
-
-#define NAME "rpi.agc"
-
-#define PIPELINE_BITS 13 // seems to be a 13-bit pipeline
-
-void AgcMeteringMode::Read(boost::property_tree::ptree const &params)
-{
- int num = 0;
- for (auto &p : params.get_child("weights")) {
- if (num == AGC_STATS_SIZE)
- throw std::runtime_error("AgcConfig: too many weights");
- weights[num++] = p.second.get_value<double>();
- }
- if (num != AGC_STATS_SIZE)
- throw std::runtime_error("AgcConfig: insufficient weights");
-}
-
-static std::string
-read_metering_modes(std::map<std::string, AgcMeteringMode> &metering_modes,
- boost::property_tree::ptree const &params)
-{
- std::string first;
- for (auto &p : params) {
- AgcMeteringMode metering_mode;
- metering_mode.Read(p.second);
- metering_modes[p.first] = std::move(metering_mode);
- if (first.empty())
- first = p.first;
- }
- return first;
-}
-
-static int read_list(std::vector<double> &list,
- boost::property_tree::ptree const &params)
-{
- for (auto &p : params)
- list.push_back(p.second.get_value<double>());
- return list.size();
-}
-
-static int read_list(std::vector<Duration> &list,
- boost::property_tree::ptree const &params)
-{
- for (auto &p : params)
- list.push_back(p.second.get_value<double>() * 1us);
- return list.size();
-}
-
-void AgcExposureMode::Read(boost::property_tree::ptree const &params)
-{
- int num_shutters = read_list(shutter, params.get_child("shutter"));
- int num_ags = read_list(gain, params.get_child("gain"));
- if (num_shutters < 2 || num_ags < 2)
- throw std::runtime_error(
- "AgcConfig: must have at least two entries in exposure profile");
- if (num_shutters != num_ags)
- throw std::runtime_error(
- "AgcConfig: expect same number of exposure and gain entries in exposure profile");
-}
-
-static std::string
-read_exposure_modes(std::map<std::string, AgcExposureMode> &exposure_modes,
- boost::property_tree::ptree const &params)
-{
- std::string first;
- for (auto &p : params) {
- AgcExposureMode exposure_mode;
- exposure_mode.Read(p.second);
- exposure_modes[p.first] = std::move(exposure_mode);
- if (first.empty())
- first = p.first;
- }
- return first;
-}
-
-void AgcConstraint::Read(boost::property_tree::ptree const &params)
-{
- std::string bound_string = params.get<std::string>("bound", "");
- transform(bound_string.begin(), bound_string.end(),
- bound_string.begin(), ::toupper);
- if (bound_string != "UPPER" && bound_string != "LOWER")
- throw std::runtime_error(
- "AGC constraint type should be UPPER or LOWER");
- bound = bound_string == "UPPER" ? Bound::UPPER : Bound::LOWER;
- q_lo = params.get<double>("q_lo");
- q_hi = params.get<double>("q_hi");
- Y_target.Read(params.get_child("y_target"));
-}
-
-static AgcConstraintMode
-read_constraint_mode(boost::property_tree::ptree const &params)
-{
- AgcConstraintMode mode;
- for (auto &p : params) {
- AgcConstraint constraint;
- constraint.Read(p.second);
- mode.push_back(std::move(constraint));
- }
- return mode;
-}
-
-static std::string read_constraint_modes(
- std::map<std::string, AgcConstraintMode> &constraint_modes,
- boost::property_tree::ptree const &params)
-{
- std::string first;
- for (auto &p : params) {
- constraint_modes[p.first] = read_constraint_mode(p.second);
- if (first.empty())
- first = p.first;
- }
- return first;
-}
-
-void AgcConfig::Read(boost::property_tree::ptree const &params)
-{
- LOG(RPiAgc, Debug) << "AgcConfig";
- default_metering_mode = read_metering_modes(
- metering_modes, params.get_child("metering_modes"));
- default_exposure_mode = read_exposure_modes(
- exposure_modes, params.get_child("exposure_modes"));
- default_constraint_mode = read_constraint_modes(
- constraint_modes, params.get_child("constraint_modes"));
- Y_target.Read(params.get_child("y_target"));
- speed = params.get<double>("speed", 0.2);
- startup_frames = params.get<uint16_t>("startup_frames", 10);
- convergence_frames = params.get<unsigned int>("convergence_frames", 6);
- fast_reduce_threshold =
- params.get<double>("fast_reduce_threshold", 0.4);
- base_ev = params.get<double>("base_ev", 1.0);
- // Start with quite a low value as ramping up is easier than ramping down.
- default_exposure_time = params.get<double>("default_exposure_time", 1000) * 1us;
- default_analogue_gain = params.get<double>("default_analogue_gain", 1.0);
-}
-
-Agc::ExposureValues::ExposureValues()
- : shutter(0s), analogue_gain(0),
- total_exposure(0s), total_exposure_no_dg(0s)
-{
-}
-
-Agc::Agc(Controller *controller)
- : AgcAlgorithm(controller), metering_mode_(nullptr),
- exposure_mode_(nullptr), constraint_mode_(nullptr),
- frame_count_(0), lock_count_(0),
- last_target_exposure_(0s), last_sensitivity_(0.0),
- ev_(1.0), flicker_period_(0s),
- max_shutter_(0s), fixed_shutter_(0s), fixed_analogue_gain_(0.0)
-{
- memset(&awb_, 0, sizeof(awb_));
- // Setting status_.total_exposure_value_ to zero initially tells us
- // it's not been calculated yet (i.e. Process hasn't yet run).
- memset(&status_, 0, sizeof(status_));
- status_.ev = ev_;
-}
-
-char const *Agc::Name() const
-{
- return NAME;
-}
-
-void Agc::Read(boost::property_tree::ptree const &params)
-{
- LOG(RPiAgc, Debug) << "Agc";
- config_.Read(params);
- // Set the config's defaults (which are the first ones it read) as our
- // current modes, until someone changes them. (they're all known to
- // exist at this point)
- metering_mode_name_ = config_.default_metering_mode;
- metering_mode_ = &config_.metering_modes[metering_mode_name_];
- exposure_mode_name_ = config_.default_exposure_mode;
- exposure_mode_ = &config_.exposure_modes[exposure_mode_name_];
- constraint_mode_name_ = config_.default_constraint_mode;
- constraint_mode_ = &config_.constraint_modes[constraint_mode_name_];
- // Set up the "last shutter/gain" values, in case AGC starts "disabled".
- status_.shutter_time = config_.default_exposure_time;
- status_.analogue_gain = config_.default_analogue_gain;
-}
-
-bool Agc::IsPaused() const
-{
- return false;
-}
-
-void Agc::Pause()
-{
- fixed_shutter_ = status_.shutter_time;
- fixed_analogue_gain_ = status_.analogue_gain;
-}
-
-void Agc::Resume()
-{
- fixed_shutter_ = 0s;
- fixed_analogue_gain_ = 0;
-}
-
-unsigned int Agc::GetConvergenceFrames() const
-{
- // If shutter and gain have been explicitly set, there is no
- // convergence to happen, so no need to drop any frames - return zero.
- if (fixed_shutter_ && fixed_analogue_gain_)
- return 0;
- else
- return config_.convergence_frames;
-}
-
-void Agc::SetEv(double ev)
-{
- ev_ = ev;
-}
-
-void Agc::SetFlickerPeriod(Duration flicker_period)
-{
- flicker_period_ = flicker_period;
-}
-
-void Agc::SetMaxShutter(Duration max_shutter)
-{
- max_shutter_ = max_shutter;
-}
-
-void Agc::SetFixedShutter(Duration fixed_shutter)
-{
- fixed_shutter_ = fixed_shutter;
- // Set this in case someone calls Pause() straight after.
- status_.shutter_time = clipShutter(fixed_shutter_);
-}
-
-void Agc::SetFixedAnalogueGain(double fixed_analogue_gain)
-{
- fixed_analogue_gain_ = fixed_analogue_gain;
- // Set this in case someone calls Pause() straight after.
- status_.analogue_gain = fixed_analogue_gain;
-}
-
-void Agc::SetMeteringMode(std::string const &metering_mode_name)
-{
- metering_mode_name_ = metering_mode_name;
-}
-
-void Agc::SetExposureMode(std::string const &exposure_mode_name)
-{
- exposure_mode_name_ = exposure_mode_name;
-}
-
-void Agc::SetConstraintMode(std::string const &constraint_mode_name)
-{
- constraint_mode_name_ = constraint_mode_name;
-}
-
-void Agc::SwitchMode(CameraMode const &camera_mode,
- Metadata *metadata)
-{
- /* AGC expects the mode sensitivity always to be non-zero. */
- ASSERT(camera_mode.sensitivity);
-
- housekeepConfig();
-
- Duration fixed_shutter = clipShutter(fixed_shutter_);
- if (fixed_shutter && fixed_analogue_gain_) {
- // We're going to reset the algorithm here with these fixed values.
-
- fetchAwbStatus(metadata);
- double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
- ASSERT(min_colour_gain != 0.0);
-
- // This is the equivalent of computeTargetExposure and applyDigitalGain.
- target_.total_exposure_no_dg = fixed_shutter * fixed_analogue_gain_;
- target_.total_exposure = target_.total_exposure_no_dg / min_colour_gain;
-
- // Equivalent of filterExposure. This resets any "history".
- filtered_ = target_;
-
- // Equivalent of divideUpExposure.
- filtered_.shutter = fixed_shutter;
- filtered_.analogue_gain = fixed_analogue_gain_;
- } else if (status_.total_exposure_value) {
- // On a mode switch, various things could happen:
- // - the exposure profile might change
- // - a fixed exposure or gain might be set
- // - the new mode's sensitivity might be different
- // We cope with the last of these by scaling the target values. After
- // that we just need to re-divide the exposure/gain according to the
- // current exposure profile, which takes care of everything else.
-
- double ratio = last_sensitivity_ / camera_mode.sensitivity;
- target_.total_exposure_no_dg *= ratio;
- target_.total_exposure *= ratio;
- filtered_.total_exposure_no_dg *= ratio;
- filtered_.total_exposure *= ratio;
-
- divideUpExposure();
- } else {
- // We come through here on startup, when at least one of the shutter
- // or gain has not been fixed. We must still write those values out so
- // that they will be applied immediately. We supply some arbitrary defaults
- // for any that weren't set.
-
- // Equivalent of divideUpExposure.
- filtered_.shutter = fixed_shutter ? fixed_shutter : config_.default_exposure_time;
- filtered_.analogue_gain = fixed_analogue_gain_ ? fixed_analogue_gain_ : config_.default_analogue_gain;
- }
-
- writeAndFinish(metadata, false);
-
- // We must remember the sensitivity of this mode for the next SwitchMode.
- last_sensitivity_ = camera_mode.sensitivity;
-}
-
-void Agc::Prepare(Metadata *image_metadata)
-{
- status_.digital_gain = 1.0;
- fetchAwbStatus(image_metadata); // always fetch it so that Process knows it's been done
-
- if (status_.total_exposure_value) {
- // Process has run, so we have meaningful values.
- DeviceStatus device_status;
- if (image_metadata->Get("device.status", device_status) == 0) {
- Duration actual_exposure = device_status.shutter_speed *
- device_status.analogue_gain;
- if (actual_exposure) {
- status_.digital_gain =
- status_.total_exposure_value /
- actual_exposure;
- LOG(RPiAgc, Debug) << "Want total exposure " << status_.total_exposure_value;
- // Never ask for a gain < 1.0, and also impose
- // some upper limit. Make it customisable?
- status_.digital_gain = std::max(
- 1.0,
- std::min(status_.digital_gain, 4.0));
- LOG(RPiAgc, Debug) << "Actual exposure " << actual_exposure;
- LOG(RPiAgc, Debug) << "Use digital_gain " << status_.digital_gain;
- LOG(RPiAgc, Debug) << "Effective exposure "
- << actual_exposure * status_.digital_gain;
- // Decide whether AEC/AGC has converged.
- updateLockStatus(device_status);
- }
- } else
- LOG(RPiAgc, Warning) << Name() << ": no device metadata";
- image_metadata->Set("agc.status", status_);
- }
-}
-
-void Agc::Process(StatisticsPtr &stats, Metadata *image_metadata)
-{
- frame_count_++;
- // First a little bit of housekeeping, fetching up-to-date settings and
- // configuration, that kind of thing.
- housekeepConfig();
- // Get the current exposure values for the frame that's just arrived.
- fetchCurrentExposure(image_metadata);
- // Compute the total gain we require relative to the current exposure.
- double gain, target_Y;
- computeGain(stats.get(), image_metadata, gain, target_Y);
- // Now compute the target (final) exposure which we think we want.
- computeTargetExposure(gain);
- // Some of the exposure has to be applied as digital gain, so work out
- // what that is. This function also tells us whether it's decided to
- // "desaturate" the image more quickly.
- bool desaturate = applyDigitalGain(gain, target_Y);
- // The results have to be filtered so as not to change too rapidly.
- filterExposure(desaturate);
- // The last thing is to divide up the exposure value into a shutter time
- // and analogue_gain, according to the current exposure mode.
- divideUpExposure();
- // Finally advertise what we've done.
- writeAndFinish(image_metadata, desaturate);
-}
-
-void Agc::updateLockStatus(DeviceStatus const &device_status)
-{
- const double ERROR_FACTOR = 0.10; // make these customisable?
- const int MAX_LOCK_COUNT = 5;
- // Reset "lock count" when we exceed this multiple of ERROR_FACTOR
- const double RESET_MARGIN = 1.5;
-
- // Add 200us to the exposure time error to allow for line quantisation.
- Duration exposure_error = last_device_status_.shutter_speed * ERROR_FACTOR + 200us;
- double gain_error = last_device_status_.analogue_gain * ERROR_FACTOR;
- Duration target_error = last_target_exposure_ * ERROR_FACTOR;
-
- // Note that we don't know the exposure/gain limits of the sensor, so
- // the values we keep requesting may be unachievable. For this reason
- // we only insist that we're close to values in the past few frames.
- if (device_status.shutter_speed > last_device_status_.shutter_speed - exposure_error &&
- device_status.shutter_speed < last_device_status_.shutter_speed + exposure_error &&
- device_status.analogue_gain > last_device_status_.analogue_gain - gain_error &&
- device_status.analogue_gain < last_device_status_.analogue_gain + gain_error &&
- status_.target_exposure_value > last_target_exposure_ - target_error &&
- status_.target_exposure_value < last_target_exposure_ + target_error)
- lock_count_ = std::min(lock_count_ + 1, MAX_LOCK_COUNT);
- else if (device_status.shutter_speed < last_device_status_.shutter_speed - RESET_MARGIN * exposure_error ||
- device_status.shutter_speed > last_device_status_.shutter_speed + RESET_MARGIN * exposure_error ||
- device_status.analogue_gain < last_device_status_.analogue_gain - RESET_MARGIN * gain_error ||
- device_status.analogue_gain > last_device_status_.analogue_gain + RESET_MARGIN * gain_error ||
- status_.target_exposure_value < last_target_exposure_ - RESET_MARGIN * target_error ||
- status_.target_exposure_value > last_target_exposure_ + RESET_MARGIN * target_error)
- lock_count_ = 0;
-
- last_device_status_ = device_status;
- last_target_exposure_ = status_.target_exposure_value;
-
- LOG(RPiAgc, Debug) << "Lock count updated to " << lock_count_;
- status_.locked = lock_count_ == MAX_LOCK_COUNT;
-}
-
-static void copy_string(std::string const &s, char *d, size_t size)
-{
- size_t length = s.copy(d, size - 1);
- d[length] = '\0';
-}
-
-void Agc::housekeepConfig()
-{
- // First fetch all the up-to-date settings, so no one else has to do it.
- status_.ev = ev_;
- status_.fixed_shutter = clipShutter(fixed_shutter_);
- status_.fixed_analogue_gain = fixed_analogue_gain_;
- status_.flicker_period = flicker_period_;
- LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixed_shutter "
- << status_.fixed_shutter << " fixed_analogue_gain "
- << status_.fixed_analogue_gain;
- // Make sure the "mode" pointers point to the up-to-date things, if
- // they've changed.
- if (strcmp(metering_mode_name_.c_str(), status_.metering_mode)) {
- auto it = config_.metering_modes.find(metering_mode_name_);
- if (it == config_.metering_modes.end())
- throw std::runtime_error("Agc: no metering mode " +
- metering_mode_name_);
- metering_mode_ = &it->second;
- copy_string(metering_mode_name_, status_.metering_mode,
- sizeof(status_.metering_mode));
- }
- if (strcmp(exposure_mode_name_.c_str(), status_.exposure_mode)) {
- auto it = config_.exposure_modes.find(exposure_mode_name_);
- if (it == config_.exposure_modes.end())
- throw std::runtime_error("Agc: no exposure profile " +
- exposure_mode_name_);
- exposure_mode_ = &it->second;
- copy_string(exposure_mode_name_, status_.exposure_mode,
- sizeof(status_.exposure_mode));
- }
- if (strcmp(constraint_mode_name_.c_str(), status_.constraint_mode)) {
- auto it =
- config_.constraint_modes.find(constraint_mode_name_);
- if (it == config_.constraint_modes.end())
- throw std::runtime_error("Agc: no constraint list " +
- constraint_mode_name_);
- constraint_mode_ = &it->second;
- copy_string(constraint_mode_name_, status_.constraint_mode,
- sizeof(status_.constraint_mode));
- }
- LOG(RPiAgc, Debug) << "exposure_mode "
- << exposure_mode_name_ << " constraint_mode "
- << constraint_mode_name_ << " metering_mode "
- << metering_mode_name_;
-}
-
-void Agc::fetchCurrentExposure(Metadata *image_metadata)
-{
- std::unique_lock<Metadata> lock(*image_metadata);
- DeviceStatus *device_status =
- image_metadata->GetLocked<DeviceStatus>("device.status");
- if (!device_status)
- throw std::runtime_error("Agc: no device metadata");
- current_.shutter = device_status->shutter_speed;
- current_.analogue_gain = device_status->analogue_gain;
- AgcStatus *agc_status =
- image_metadata->GetLocked<AgcStatus>("agc.status");
- current_.total_exposure = agc_status ? agc_status->total_exposure_value : 0s;
- current_.total_exposure_no_dg = current_.shutter * current_.analogue_gain;
-}
-
-void Agc::fetchAwbStatus(Metadata *image_metadata)
-{
- awb_.gain_r = 1.0; // in case not found in metadata
- awb_.gain_g = 1.0;
- awb_.gain_b = 1.0;
- if (image_metadata->Get("awb.status", awb_) != 0)
- LOG(RPiAgc, Debug) << "Agc: no AWB status found";
-}
-
-static double compute_initial_Y(bcm2835_isp_stats *stats, AwbStatus const &awb,
- double weights[], double gain)
-{
- bcm2835_isp_stats_region *regions = stats->agc_stats;
- // Note how the calculation below means that equal weights give you
- // "average" metering (i.e. all pixels equally important).
- double R_sum = 0, G_sum = 0, B_sum = 0, pixel_sum = 0;
- for (int i = 0; i < AGC_STATS_SIZE; i++) {
- double counted = regions[i].counted;
- double r_sum = std::min(regions[i].r_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
- double g_sum = std::min(regions[i].g_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
- double b_sum = std::min(regions[i].b_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
- R_sum += r_sum * weights[i];
- G_sum += g_sum * weights[i];
- B_sum += b_sum * weights[i];
- pixel_sum += counted * weights[i];
- }
- if (pixel_sum == 0.0) {
- LOG(RPiAgc, Warning) << "compute_initial_Y: pixel_sum is zero";
- return 0;
- }
- double Y_sum = R_sum * awb.gain_r * .299 +
- G_sum * awb.gain_g * .587 +
- B_sum * awb.gain_b * .114;
- return Y_sum / pixel_sum / (1 << PIPELINE_BITS);
-}
-
-// We handle extra gain through EV by adjusting our Y targets. However, you
-// simply can't monitor histograms once they get very close to (or beyond!)
-// saturation, so we clamp the Y targets to this value. It does mean that EV
-// increases don't necessarily do quite what you might expect in certain
-// (contrived) cases.
-
-#define EV_GAIN_Y_TARGET_LIMIT 0.9
-
-static double constraint_compute_gain(AgcConstraint &c, Histogram &h,
- double lux, double ev_gain,
- double &target_Y)
-{
- target_Y = c.Y_target.Eval(c.Y_target.Domain().Clip(lux));
- target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain);
- double iqm = h.InterQuantileMean(c.q_lo, c.q_hi);
- return (target_Y * NUM_HISTOGRAM_BINS) / iqm;
-}
-
-void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata,
- double &gain, double &target_Y)
-{
- struct LuxStatus lux = {};
- lux.lux = 400; // default lux level to 400 in case no metadata found
- if (image_metadata->Get("lux.status", lux) != 0)
- LOG(RPiAgc, Warning) << "Agc: no lux level found";
- Histogram h(statistics->hist[0].g_hist, NUM_HISTOGRAM_BINS);
- double ev_gain = status_.ev * config_.base_ev;
- // The initial gain and target_Y come from some of the regions. After
- // that we consider the histogram constraints.
- target_Y =
- config_.Y_target.Eval(config_.Y_target.Domain().Clip(lux.lux));
- target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain);
-
- // Do this calculation a few times as brightness increase can be
- // non-linear when there are saturated regions.
- gain = 1.0;
- for (int i = 0; i < 8; i++) {
- double initial_Y = compute_initial_Y(statistics, awb_,
- metering_mode_->weights, gain);
- double extra_gain = std::min(10.0, target_Y / (initial_Y + .001));
- gain *= extra_gain;
- LOG(RPiAgc, Debug) << "Initial Y " << initial_Y << " target " << target_Y
- << " gives gain " << gain;
- if (extra_gain < 1.01) // close enough
- break;
- }
-
- for (auto &c : *constraint_mode_) {
- double new_target_Y;
- double new_gain =
- constraint_compute_gain(c, h, lux.lux, ev_gain,
- new_target_Y);
- LOG(RPiAgc, Debug) << "Constraint has target_Y "
- << new_target_Y << " giving gain " << new_gain;
- if (c.bound == AgcConstraint::Bound::LOWER &&
- new_gain > gain) {
- LOG(RPiAgc, Debug) << "Lower bound constraint adopted";
- gain = new_gain, target_Y = new_target_Y;
- } else if (c.bound == AgcConstraint::Bound::UPPER &&
- new_gain < gain) {
- LOG(RPiAgc, Debug) << "Upper bound constraint adopted";
- gain = new_gain, target_Y = new_target_Y;
- }
- }
- LOG(RPiAgc, Debug) << "Final gain " << gain << " (target_Y " << target_Y << " ev "
- << status_.ev << " base_ev " << config_.base_ev
- << ")";
-}
-
-void Agc::computeTargetExposure(double gain)
-{
- if (status_.fixed_shutter && status_.fixed_analogue_gain) {
- // When ag and shutter are both fixed, we need to drive the
- // total exposure so that we end up with a digital gain of at least
- // 1/min_colour_gain. Otherwise we'd desaturate channels causing
- // white to go cyan or magenta.
- double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
- ASSERT(min_colour_gain != 0.0);
- target_.total_exposure =
- status_.fixed_shutter * status_.fixed_analogue_gain / min_colour_gain;
- } else {
- // The statistics reflect the image without digital gain, so the final
- // total exposure we're aiming for is:
- target_.total_exposure = current_.total_exposure_no_dg * gain;
- // The final target exposure is also limited to what the exposure
- // mode allows.
- Duration max_shutter = status_.fixed_shutter
- ? status_.fixed_shutter
- : exposure_mode_->shutter.back();
- max_shutter = clipShutter(max_shutter);
- Duration max_total_exposure =
- max_shutter *
- (status_.fixed_analogue_gain != 0.0
- ? status_.fixed_analogue_gain
- : exposure_mode_->gain.back());
- target_.total_exposure = std::min(target_.total_exposure,
- max_total_exposure);
- }
- LOG(RPiAgc, Debug) << "Target total_exposure " << target_.total_exposure;
-}
-
-bool Agc::applyDigitalGain(double gain, double target_Y)
-{
- double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
- ASSERT(min_colour_gain != 0.0);
- double dg = 1.0 / min_colour_gain;
- // I think this pipeline subtracts black level and rescales before we
- // get the stats, so no need to worry about it.
- LOG(RPiAgc, Debug) << "after AWB, target dg " << dg << " gain " << gain
- << " target_Y " << target_Y;
- // Finally, if we're trying to reduce exposure but the target_Y is
- // "close" to 1.0, then the gain computed for that constraint will be
- // only slightly less than one, because the measured Y can never be
- // larger than 1.0. When this happens, demand a large digital gain so
- // that the exposure can be reduced, de-saturating the image much more
- // quickly (and we then approach the correct value more quickly from
- // below).
- bool desaturate = target_Y > config_.fast_reduce_threshold &&
- gain < sqrt(target_Y);
- if (desaturate)
- dg /= config_.fast_reduce_threshold;
- LOG(RPiAgc, Debug) << "Digital gain " << dg << " desaturate? " << desaturate;
- target_.total_exposure_no_dg = target_.total_exposure / dg;
- LOG(RPiAgc, Debug) << "Target total_exposure_no_dg " << target_.total_exposure_no_dg;
- return desaturate;
-}
-
-void Agc::filterExposure(bool desaturate)
-{
- double speed = config_.speed;
- // AGC adapts instantly if both shutter and gain are directly specified
- // or we're in the startup phase.
- if ((status_.fixed_shutter && status_.fixed_analogue_gain) ||
- frame_count_ <= config_.startup_frames)
- speed = 1.0;
- if (!filtered_.total_exposure) {
- filtered_.total_exposure = target_.total_exposure;
- filtered_.total_exposure_no_dg = target_.total_exposure_no_dg;
- } else {
- // If close to the result go faster, to save making so many
- // micro-adjustments on the way. (Make this customisable?)
- if (filtered_.total_exposure < 1.2 * target_.total_exposure &&
- filtered_.total_exposure > 0.8 * target_.total_exposure)
- speed = sqrt(speed);
- filtered_.total_exposure = speed * target_.total_exposure +
- filtered_.total_exposure * (1.0 - speed);
- // When desaturing, take a big jump down in exposure_no_dg,
- // which we'll hide with digital gain.
- if (desaturate)
- filtered_.total_exposure_no_dg =
- target_.total_exposure_no_dg;
- else
- filtered_.total_exposure_no_dg =
- speed * target_.total_exposure_no_dg +
- filtered_.total_exposure_no_dg * (1.0 - speed);
- }
- // We can't let the no_dg exposure deviate too far below the
- // total exposure, as there might not be enough digital gain available
- // in the ISP to hide it (which will cause nasty oscillation).
- if (filtered_.total_exposure_no_dg <
- filtered_.total_exposure * config_.fast_reduce_threshold)
- filtered_.total_exposure_no_dg = filtered_.total_exposure *
- config_.fast_reduce_threshold;
- LOG(RPiAgc, Debug) << "After filtering, total_exposure " << filtered_.total_exposure
- << " no dg " << filtered_.total_exposure_no_dg;
-}
-
-void Agc::divideUpExposure()
-{
- // Sending the fixed shutter/gain cases through the same code may seem
- // unnecessary, but it will make more sense when extend this to cover
- // variable aperture.
- Duration exposure_value = filtered_.total_exposure_no_dg;
- Duration shutter_time;
- double analogue_gain;
- shutter_time = status_.fixed_shutter
- ? status_.fixed_shutter
- : exposure_mode_->shutter[0];
- shutter_time = clipShutter(shutter_time);
- analogue_gain = status_.fixed_analogue_gain != 0.0
- ? status_.fixed_analogue_gain
- : exposure_mode_->gain[0];
- if (shutter_time * analogue_gain < exposure_value) {
- for (unsigned int stage = 1;
- stage < exposure_mode_->gain.size(); stage++) {
- if (!status_.fixed_shutter) {
- Duration stage_shutter =
- clipShutter(exposure_mode_->shutter[stage]);
- if (stage_shutter * analogue_gain >=
- exposure_value) {
- shutter_time =
- exposure_value / analogue_gain;
- break;
- }
- shutter_time = stage_shutter;
- }
- if (status_.fixed_analogue_gain == 0.0) {
- if (exposure_mode_->gain[stage] *
- shutter_time >=
- exposure_value) {
- analogue_gain =
- exposure_value / shutter_time;
- break;
- }
- analogue_gain = exposure_mode_->gain[stage];
- }
- }
- }
- LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutter_time << " and "
- << analogue_gain;
- // Finally adjust shutter time for flicker avoidance (require both
- // shutter and gain not to be fixed).
- if (!status_.fixed_shutter && !status_.fixed_analogue_gain &&
- status_.flicker_period) {
- int flicker_periods = shutter_time / status_.flicker_period;
- if (flicker_periods) {
- Duration new_shutter_time = flicker_periods * status_.flicker_period;
- analogue_gain *= shutter_time / new_shutter_time;
- // We should still not allow the ag to go over the
- // largest value in the exposure mode. Note that this
- // may force more of the total exposure into the digital
- // gain as a side-effect.
- analogue_gain = std::min(analogue_gain,
- exposure_mode_->gain.back());
- shutter_time = new_shutter_time;
- }
- LOG(RPiAgc, Debug) << "After flicker avoidance, shutter "
- << shutter_time << " gain " << analogue_gain;
- }
- filtered_.shutter = shutter_time;
- filtered_.analogue_gain = analogue_gain;
-}
-
-void Agc::writeAndFinish(Metadata *image_metadata, bool desaturate)
-{
- status_.total_exposure_value = filtered_.total_exposure;
- status_.target_exposure_value = desaturate ? 0s : target_.total_exposure_no_dg;
- status_.shutter_time = filtered_.shutter;
- status_.analogue_gain = filtered_.analogue_gain;
- // Write to metadata as well, in case anyone wants to update the camera
- // immediately.
- image_metadata->Set("agc.status", status_);
- LOG(RPiAgc, Debug) << "Output written, total exposure requested is "
- << filtered_.total_exposure;
- LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter
- << " analogue gain " << filtered_.analogue_gain;
-}
-
-Duration Agc::clipShutter(Duration shutter)
-{
- if (max_shutter_)
- shutter = std::min(shutter, max_shutter_);
- return shutter;
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Agc(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.hpp b/src/ipa/raspberrypi/controller/rpi/agc.hpp
deleted file mode 100644
index c100d312..00000000
--- a/src/ipa/raspberrypi/controller/rpi/agc.hpp
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * agc.hpp - AGC/AEC control algorithm
- */
-#pragma once
-
-#include <vector>
-#include <mutex>
-
-#include <libcamera/base/utils.h>
-
-#include "../agc_algorithm.hpp"
-#include "../agc_status.h"
-#include "../pwl.hpp"
-
-// This is our implementation of AGC.
-
-// This is the number actually set up by the firmware, not the maximum possible
-// number (which is 16).
-
-#define AGC_STATS_SIZE 15
-
-namespace RPiController {
-
-struct AgcMeteringMode {
- double weights[AGC_STATS_SIZE];
- void Read(boost::property_tree::ptree const &params);
-};
-
-struct AgcExposureMode {
- std::vector<libcamera::utils::Duration> shutter;
- std::vector<double> gain;
- void Read(boost::property_tree::ptree const &params);
-};
-
-struct AgcConstraint {
- enum class Bound { LOWER = 0, UPPER = 1 };
- Bound bound;
- double q_lo;
- double q_hi;
- Pwl Y_target;
- void Read(boost::property_tree::ptree const &params);
-};
-
-typedef std::vector<AgcConstraint> AgcConstraintMode;
-
-struct AgcConfig {
- void Read(boost::property_tree::ptree const &params);
- std::map<std::string, AgcMeteringMode> metering_modes;
- std::map<std::string, AgcExposureMode> exposure_modes;
- std::map<std::string, AgcConstraintMode> constraint_modes;
- Pwl Y_target;
- double speed;
- uint16_t startup_frames;
- unsigned int convergence_frames;
- double max_change;
- double min_change;
- double fast_reduce_threshold;
- double speed_up_threshold;
- std::string default_metering_mode;
- std::string default_exposure_mode;
- std::string default_constraint_mode;
- double base_ev;
- libcamera::utils::Duration default_exposure_time;
- double default_analogue_gain;
-};
-
-class Agc : public AgcAlgorithm
-{
-public:
- Agc(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- // AGC handles "pausing" for itself.
- bool IsPaused() const override;
- void Pause() override;
- void Resume() override;
- unsigned int GetConvergenceFrames() const override;
- void SetEv(double ev) override;
- void SetFlickerPeriod(libcamera::utils::Duration flicker_period) override;
- void SetMaxShutter(libcamera::utils::Duration max_shutter) override;
- void SetFixedShutter(libcamera::utils::Duration fixed_shutter) override;
- void SetFixedAnalogueGain(double fixed_analogue_gain) override;
- void SetMeteringMode(std::string const &metering_mode_name) override;
- void SetExposureMode(std::string const &exposure_mode_name) override;
- void SetConstraintMode(std::string const &contraint_mode_name) override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
-
-private:
- void updateLockStatus(DeviceStatus const &device_status);
- AgcConfig config_;
- void housekeepConfig();
- void fetchCurrentExposure(Metadata *image_metadata);
- void fetchAwbStatus(Metadata *image_metadata);
- void computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata,
- double &gain, double &target_Y);
- void computeTargetExposure(double gain);
- bool applyDigitalGain(double gain, double target_Y);
- void filterExposure(bool desaturate);
- void divideUpExposure();
- void writeAndFinish(Metadata *image_metadata, bool desaturate);
- libcamera::utils::Duration clipShutter(libcamera::utils::Duration shutter);
- AgcMeteringMode *metering_mode_;
- AgcExposureMode *exposure_mode_;
- AgcConstraintMode *constraint_mode_;
- uint64_t frame_count_;
- AwbStatus awb_;
- struct ExposureValues {
- ExposureValues();
-
- libcamera::utils::Duration shutter;
- double analogue_gain;
- libcamera::utils::Duration total_exposure;
- libcamera::utils::Duration total_exposure_no_dg; // without digital gain
- };
- ExposureValues current_; // values for the current frame
- ExposureValues target_; // calculate the values we want here
- ExposureValues filtered_; // these values are filtered towards target
- AgcStatus status_;
- int lock_count_;
- DeviceStatus last_device_status_;
- libcamera::utils::Duration last_target_exposure_;
- double last_sensitivity_; // sensitivity of the previous camera mode
- // Below here the "settings" that applications can change.
- std::string metering_mode_name_;
- std::string exposure_mode_name_;
- std::string constraint_mode_name_;
- double ev_;
- libcamera::utils::Duration flicker_period_;
- libcamera::utils::Duration max_shutter_;
- libcamera::utils::Duration fixed_shutter_;
- double fixed_analogue_gain_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.cpp b/src/ipa/raspberrypi/controller/rpi/alsc.cpp
deleted file mode 100644
index e575c14a..00000000
--- a/src/ipa/raspberrypi/controller/rpi/alsc.cpp
+++ /dev/null
@@ -1,787 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * alsc.cpp - ALSC (auto lens shading correction) control algorithm
- */
-
-#include <math.h>
-#include <numeric>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/span.h>
-
-#include "../awb_status.h"
-#include "alsc.hpp"
-
-// Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm.
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiAlsc)
-
-#define NAME "rpi.alsc"
-
-static const int X = ALSC_CELLS_X;
-static const int Y = ALSC_CELLS_Y;
-static const int XY = X * Y;
-static const double INSUFFICIENT_DATA = -1.0;
-
-Alsc::Alsc(Controller *controller)
- : Algorithm(controller)
-{
- async_abort_ = async_start_ = async_started_ = async_finished_ = false;
- async_thread_ = std::thread(std::bind(&Alsc::asyncFunc, this));
-}
-
-Alsc::~Alsc()
-{
- {
- std::lock_guard<std::mutex> lock(mutex_);
- async_abort_ = true;
- }
- async_signal_.notify_one();
- async_thread_.join();
-}
-
-char const *Alsc::Name() const
-{
- return NAME;
-}
-
-static void generate_lut(double *lut, boost::property_tree::ptree const &params)
-{
- double cstrength = params.get<double>("corner_strength", 2.0);
- if (cstrength <= 1.0)
- throw std::runtime_error("Alsc: corner_strength must be > 1.0");
- double asymmetry = params.get<double>("asymmetry", 1.0);
- if (asymmetry < 0)
- throw std::runtime_error("Alsc: asymmetry must be >= 0");
- double f1 = cstrength - 1, f2 = 1 + sqrt(cstrength);
- double R2 = X * Y / 4 * (1 + asymmetry * asymmetry);
- int num = 0;
- for (int y = 0; y < Y; y++) {
- for (int x = 0; x < X; x++) {
- double dy = y - Y / 2 + 0.5,
- dx = (x - X / 2 + 0.5) * asymmetry;
- double r2 = (dx * dx + dy * dy) / R2;
- lut[num++] =
- (f1 * r2 + f2) * (f1 * r2 + f2) /
- (f2 * f2); // this reproduces the cos^4 rule
- }
- }
-}
-
-static void read_lut(double *lut, boost::property_tree::ptree const &params)
-{
- int num = 0;
- const int max_num = XY;
- for (auto &p : params) {
- if (num == max_num)
- throw std::runtime_error(
- "Alsc: too many entries in LSC table");
- lut[num++] = p.second.get_value<double>();
- }
- if (num < max_num)
- throw std::runtime_error("Alsc: too few entries in LSC table");
-}
-
-static void read_calibrations(std::vector<AlscCalibration> &calibrations,
- boost::property_tree::ptree const &params,
- std::string const &name)
-{
- if (params.get_child_optional(name)) {
- double last_ct = 0;
- for (auto &p : params.get_child(name)) {
- double ct = p.second.get<double>("ct");
- if (ct <= last_ct)
- throw std::runtime_error(
- "Alsc: entries in " + name +
- " must be in increasing ct order");
- AlscCalibration calibration;
- calibration.ct = last_ct = ct;
- boost::property_tree::ptree const &table =
- p.second.get_child("table");
- int num = 0;
- for (auto it = table.begin(); it != table.end(); it++) {
- if (num == XY)
- throw std::runtime_error(
- "Alsc: too many values for ct " +
- std::to_string(ct) + " in " +
- name);
- calibration.table[num++] =
- it->second.get_value<double>();
- }
- if (num != XY)
- throw std::runtime_error(
- "Alsc: too few values for ct " +
- std::to_string(ct) + " in " + name);
- calibrations.push_back(calibration);
- LOG(RPiAlsc, Debug)
- << "Read " << name << " calibration for ct " << ct;
- }
- }
-}
-
-void Alsc::Read(boost::property_tree::ptree const &params)
-{
- config_.frame_period = params.get<uint16_t>("frame_period", 12);
- config_.startup_frames = params.get<uint16_t>("startup_frames", 10);
- config_.speed = params.get<double>("speed", 0.05);
- double sigma = params.get<double>("sigma", 0.01);
- config_.sigma_Cr = params.get<double>("sigma_Cr", sigma);
- config_.sigma_Cb = params.get<double>("sigma_Cb", sigma);
- config_.min_count = params.get<double>("min_count", 10.0);
- config_.min_G = params.get<uint16_t>("min_G", 50);
- config_.omega = params.get<double>("omega", 1.3);
- config_.n_iter = params.get<uint32_t>("n_iter", X + Y);
- config_.luminance_strength =
- params.get<double>("luminance_strength", 1.0);
- for (int i = 0; i < XY; i++)
- config_.luminance_lut[i] = 1.0;
- if (params.get_child_optional("corner_strength"))
- generate_lut(config_.luminance_lut, params);
- else if (params.get_child_optional("luminance_lut"))
- read_lut(config_.luminance_lut,
- params.get_child("luminance_lut"));
- else
- LOG(RPiAlsc, Warning)
- << "no luminance table - assume unity everywhere";
- read_calibrations(config_.calibrations_Cr, params, "calibrations_Cr");
- read_calibrations(config_.calibrations_Cb, params, "calibrations_Cb");
- config_.default_ct = params.get<double>("default_ct", 4500.0);
- config_.threshold = params.get<double>("threshold", 1e-3);
- config_.lambda_bound = params.get<double>("lambda_bound", 0.05);
-}
-
-static double get_ct(Metadata *metadata, double default_ct);
-static void get_cal_table(double ct,
- std::vector<AlscCalibration> const &calibrations,
- double cal_table[XY]);
-static void resample_cal_table(double const cal_table_in[XY],
- CameraMode const &camera_mode,
- double cal_table_out[XY]);
-static void compensate_lambdas_for_cal(double const cal_table[XY],
- double const old_lambdas[XY],
- double new_lambdas[XY]);
-static void add_luminance_to_tables(double results[3][Y][X],
- double const lambda_r[XY], double lambda_g,
- double const lambda_b[XY],
- double const luminance_lut[XY],
- double luminance_strength);
-
-void Alsc::Initialise()
-{
- frame_count2_ = frame_count_ = frame_phase_ = 0;
- first_time_ = true;
- ct_ = config_.default_ct;
- // The lambdas are initialised in the SwitchMode.
-}
-
-void Alsc::waitForAysncThread()
-{
- if (async_started_) {
- async_started_ = false;
- std::unique_lock<std::mutex> lock(mutex_);
- sync_signal_.wait(lock, [&] {
- return async_finished_;
- });
- async_finished_ = false;
- }
-}
-
-static bool compare_modes(CameraMode const &cm0, CameraMode const &cm1)
-{
- // Return true if the modes crop from the sensor significantly differently,
- // or if the user transform has changed.
- if (cm0.transform != cm1.transform)
- return true;
- int left_diff = abs(cm0.crop_x - cm1.crop_x);
- int top_diff = abs(cm0.crop_y - cm1.crop_y);
- int right_diff = fabs(cm0.crop_x + cm0.scale_x * cm0.width -
- cm1.crop_x - cm1.scale_x * cm1.width);
- int bottom_diff = fabs(cm0.crop_y + cm0.scale_y * cm0.height -
- cm1.crop_y - cm1.scale_y * cm1.height);
- // These thresholds are a rather arbitrary amount chosen to trigger
- // when carrying on with the previously calculated tables might be
- // worse than regenerating them (but without the adaptive algorithm).
- int threshold_x = cm0.sensor_width >> 4;
- int threshold_y = cm0.sensor_height >> 4;
- return left_diff > threshold_x || right_diff > threshold_x ||
- top_diff > threshold_y || bottom_diff > threshold_y;
-}
-
-void Alsc::SwitchMode(CameraMode const &camera_mode,
- [[maybe_unused]] Metadata *metadata)
-{
- // We're going to start over with the tables if there's any "significant"
- // change.
- bool reset_tables = first_time_ || compare_modes(camera_mode_, camera_mode);
-
- // Believe the colour temperature from the AWB, if there is one.
- ct_ = get_ct(metadata, ct_);
-
- // Ensure the other thread isn't running while we do this.
- waitForAysncThread();
-
- camera_mode_ = camera_mode;
-
- // We must resample the luminance table like we do the others, but it's
- // fixed so we can simply do it up front here.
- resample_cal_table(config_.luminance_lut, camera_mode_, luminance_table_);
-
- if (reset_tables) {
- // Upon every "table reset", arrange for something sensible to be
- // generated. Construct the tables for the previous recorded colour
- // temperature. In order to start over from scratch we initialise
- // the lambdas, but the rest of this code then echoes the code in
- // doAlsc, without the adaptive algorithm.
- for (int i = 0; i < XY; i++)
- lambda_r_[i] = lambda_b_[i] = 1.0;
- double cal_table_r[XY], cal_table_b[XY], cal_table_tmp[XY];
- get_cal_table(ct_, config_.calibrations_Cr, cal_table_tmp);
- resample_cal_table(cal_table_tmp, camera_mode_, cal_table_r);
- get_cal_table(ct_, config_.calibrations_Cb, cal_table_tmp);
- resample_cal_table(cal_table_tmp, camera_mode_, cal_table_b);
- compensate_lambdas_for_cal(cal_table_r, lambda_r_,
- async_lambda_r_);
- compensate_lambdas_for_cal(cal_table_b, lambda_b_,
- async_lambda_b_);
- add_luminance_to_tables(sync_results_, async_lambda_r_, 1.0,
- async_lambda_b_, luminance_table_,
- config_.luminance_strength);
- memcpy(prev_sync_results_, sync_results_,
- sizeof(prev_sync_results_));
- frame_phase_ = config_.frame_period; // run the algo again asap
- first_time_ = false;
- }
-}
-
-void Alsc::fetchAsyncResults()
-{
- LOG(RPiAlsc, Debug) << "Fetch ALSC results";
- async_finished_ = false;
- async_started_ = false;
- memcpy(sync_results_, async_results_, sizeof(sync_results_));
-}
-
-double get_ct(Metadata *metadata, double default_ct)
-{
- AwbStatus awb_status;
- awb_status.temperature_K = default_ct; // in case nothing found
- if (metadata->Get("awb.status", awb_status) != 0)
- LOG(RPiAlsc, Debug) << "no AWB results found, using "
- << awb_status.temperature_K;
- else
- LOG(RPiAlsc, Debug) << "AWB results found, using "
- << awb_status.temperature_K;
- return awb_status.temperature_K;
-}
-
-static void copy_stats(bcm2835_isp_stats_region regions[XY], StatisticsPtr &stats,
- AlscStatus const &status)
-{
- bcm2835_isp_stats_region *input_regions = stats->awb_stats;
- double *r_table = (double *)status.r;
- double *g_table = (double *)status.g;
- double *b_table = (double *)status.b;
- for (int i = 0; i < XY; i++) {
- regions[i].r_sum = input_regions[i].r_sum / r_table[i];
- regions[i].g_sum = input_regions[i].g_sum / g_table[i];
- regions[i].b_sum = input_regions[i].b_sum / b_table[i];
- regions[i].counted = input_regions[i].counted;
- // (don't care about the uncounted value)
- }
-}
-
-void Alsc::restartAsync(StatisticsPtr &stats, Metadata *image_metadata)
-{
- LOG(RPiAlsc, Debug) << "Starting ALSC calculation";
- // Get the current colour temperature. It's all we need from the
- // metadata. Default to the last CT value (which could be the default).
- ct_ = get_ct(image_metadata, ct_);
- // We have to copy the statistics here, dividing out our best guess of
- // the LSC table that the pipeline applied to them.
- AlscStatus alsc_status;
- if (image_metadata->Get("alsc.status", alsc_status) != 0) {
- LOG(RPiAlsc, Warning)
- << "No ALSC status found for applied gains!";
- for (int y = 0; y < Y; y++)
- for (int x = 0; x < X; x++) {
- alsc_status.r[y][x] = 1.0;
- alsc_status.g[y][x] = 1.0;
- alsc_status.b[y][x] = 1.0;
- }
- }
- copy_stats(statistics_, stats, alsc_status);
- frame_phase_ = 0;
- async_started_ = true;
- {
- std::lock_guard<std::mutex> lock(mutex_);
- async_start_ = true;
- }
- async_signal_.notify_one();
-}
-
-void Alsc::Prepare(Metadata *image_metadata)
-{
- // Count frames since we started, and since we last poked the async
- // thread.
- if (frame_count_ < (int)config_.startup_frames)
- frame_count_++;
- double speed = frame_count_ < (int)config_.startup_frames
- ? 1.0
- : config_.speed;
- LOG(RPiAlsc, Debug)
- << "frame_count " << frame_count_ << " speed " << speed;
- {
- std::unique_lock<std::mutex> lock(mutex_);
- if (async_started_ && async_finished_)
- fetchAsyncResults();
- }
- // Apply IIR filter to results and program into the pipeline.
- double *ptr = (double *)sync_results_,
- *pptr = (double *)prev_sync_results_;
- for (unsigned int i = 0;
- i < sizeof(sync_results_) / sizeof(double); i++)
- pptr[i] = speed * ptr[i] + (1.0 - speed) * pptr[i];
- // Put output values into status metadata.
- AlscStatus status;
- memcpy(status.r, prev_sync_results_[0], sizeof(status.r));
- memcpy(status.g, prev_sync_results_[1], sizeof(status.g));
- memcpy(status.b, prev_sync_results_[2], sizeof(status.b));
- image_metadata->Set("alsc.status", status);
-}
-
-void Alsc::Process(StatisticsPtr &stats, Metadata *image_metadata)
-{
- // Count frames since we started, and since we last poked the async
- // thread.
- if (frame_phase_ < (int)config_.frame_period)
- frame_phase_++;
- if (frame_count2_ < (int)config_.startup_frames)
- frame_count2_++;
- LOG(RPiAlsc, Debug) << "frame_phase " << frame_phase_;
- if (frame_phase_ >= (int)config_.frame_period ||
- frame_count2_ < (int)config_.startup_frames) {
- if (async_started_ == false)
- restartAsync(stats, image_metadata);
- }
-}
-
-void Alsc::asyncFunc()
-{
- while (true) {
- {
- std::unique_lock<std::mutex> lock(mutex_);
- async_signal_.wait(lock, [&] {
- return async_start_ || async_abort_;
- });
- async_start_ = false;
- if (async_abort_)
- break;
- }
- doAlsc();
- {
- std::lock_guard<std::mutex> lock(mutex_);
- async_finished_ = true;
- }
- sync_signal_.notify_one();
- }
-}
-
-void get_cal_table(double ct, std::vector<AlscCalibration> const &calibrations,
- double cal_table[XY])
-{
- if (calibrations.empty()) {
- for (int i = 0; i < XY; i++)
- cal_table[i] = 1.0;
- LOG(RPiAlsc, Debug) << "no calibrations found";
- } else if (ct <= calibrations.front().ct) {
- memcpy(cal_table, calibrations.front().table,
- XY * sizeof(double));
- LOG(RPiAlsc, Debug) << "using calibration for "
- << calibrations.front().ct;
- } else if (ct >= calibrations.back().ct) {
- memcpy(cal_table, calibrations.back().table,
- XY * sizeof(double));
- LOG(RPiAlsc, Debug) << "using calibration for "
- << calibrations.back().ct;
- } else {
- int idx = 0;
- while (ct > calibrations[idx + 1].ct)
- idx++;
- double ct0 = calibrations[idx].ct,
- ct1 = calibrations[idx + 1].ct;
- LOG(RPiAlsc, Debug)
- << "ct is " << ct << ", interpolating between "
- << ct0 << " and " << ct1;
- for (int i = 0; i < XY; i++)
- cal_table[i] =
- (calibrations[idx].table[i] * (ct1 - ct) +
- calibrations[idx + 1].table[i] * (ct - ct0)) /
- (ct1 - ct0);
- }
-}
-
-void resample_cal_table(double const cal_table_in[XY],
- CameraMode const &camera_mode, double cal_table_out[XY])
-{
- // Precalculate and cache the x sampling locations and phases to save
- // recomputing them on every row.
- int x_lo[X], x_hi[X];
- double xf[X];
- double scale_x = camera_mode.sensor_width /
- (camera_mode.width * camera_mode.scale_x);
- double x_off = camera_mode.crop_x / (double)camera_mode.sensor_width;
- double x = .5 / scale_x + x_off * X - .5;
- double x_inc = 1 / scale_x;
- for (int i = 0; i < X; i++, x += x_inc) {
- x_lo[i] = floor(x);
- xf[i] = x - x_lo[i];
- x_hi[i] = std::min(x_lo[i] + 1, X - 1);
- x_lo[i] = std::max(x_lo[i], 0);
- if (!!(camera_mode.transform & libcamera::Transform::HFlip)) {
- x_lo[i] = X - 1 - x_lo[i];
- x_hi[i] = X - 1 - x_hi[i];
- }
- }
- // Now march over the output table generating the new values.
- double scale_y = camera_mode.sensor_height /
- (camera_mode.height * camera_mode.scale_y);
- double y_off = camera_mode.crop_y / (double)camera_mode.sensor_height;
- double y = .5 / scale_y + y_off * Y - .5;
- double y_inc = 1 / scale_y;
- for (int j = 0; j < Y; j++, y += y_inc) {
- int y_lo = floor(y);
- double yf = y - y_lo;
- int y_hi = std::min(y_lo + 1, Y - 1);
- y_lo = std::max(y_lo, 0);
- if (!!(camera_mode.transform & libcamera::Transform::VFlip)) {
- y_lo = Y - 1 - y_lo;
- y_hi = Y - 1 - y_hi;
- }
- double const *row_above = cal_table_in + X * y_lo;
- double const *row_below = cal_table_in + X * y_hi;
- for (int i = 0; i < X; i++) {
- double above = row_above[x_lo[i]] * (1 - xf[i]) +
- row_above[x_hi[i]] * xf[i];
- double below = row_below[x_lo[i]] * (1 - xf[i]) +
- row_below[x_hi[i]] * xf[i];
- *(cal_table_out++) = above * (1 - yf) + below * yf;
- }
- }
-}
-
-// Calculate chrominance statistics (R/G and B/G) for each region.
-static_assert(XY == AWB_REGIONS, "ALSC/AWB statistics region mismatch");
-static void calculate_Cr_Cb(bcm2835_isp_stats_region *awb_region, double Cr[XY],
- double Cb[XY], uint32_t min_count, uint16_t min_G)
-{
- for (int i = 0; i < XY; i++) {
- bcm2835_isp_stats_region &zone = awb_region[i];
- if (zone.counted <= min_count ||
- zone.g_sum / zone.counted <= min_G) {
- Cr[i] = Cb[i] = INSUFFICIENT_DATA;
- continue;
- }
- Cr[i] = zone.r_sum / (double)zone.g_sum;
- Cb[i] = zone.b_sum / (double)zone.g_sum;
- }
-}
-
-static void apply_cal_table(double const cal_table[XY], double C[XY])
-{
- for (int i = 0; i < XY; i++)
- if (C[i] != INSUFFICIENT_DATA)
- C[i] *= cal_table[i];
-}
-
-void compensate_lambdas_for_cal(double const cal_table[XY],
- double const old_lambdas[XY],
- double new_lambdas[XY])
-{
- double min_new_lambda = std::numeric_limits<double>::max();
- for (int i = 0; i < XY; i++) {
- new_lambdas[i] = old_lambdas[i] * cal_table[i];
- min_new_lambda = std::min(min_new_lambda, new_lambdas[i]);
- }
- for (int i = 0; i < XY; i++)
- new_lambdas[i] /= min_new_lambda;
-}
-
-[[maybe_unused]] static void print_cal_table(double const C[XY])
-{
- printf("table: [\n");
- for (int j = 0; j < Y; j++) {
- for (int i = 0; i < X; i++) {
- printf("%5.3f", 1.0 / C[j * X + i]);
- if (i != X - 1 || j != Y - 1)
- printf(",");
- }
- printf("\n");
- }
- printf("]\n");
-}
-
-// Compute weight out of 1.0 which reflects how similar we wish to make the
-// colours of these two regions.
-static double compute_weight(double C_i, double C_j, double sigma)
-{
- if (C_i == INSUFFICIENT_DATA || C_j == INSUFFICIENT_DATA)
- return 0;
- double diff = (C_i - C_j) / sigma;
- return exp(-diff * diff / 2);
-}
-
-// Compute all weights.
-static void compute_W(double const C[XY], double sigma, double W[XY][4])
-{
- for (int i = 0; i < XY; i++) {
- // Start with neighbour above and go clockwise.
- W[i][0] = i >= X ? compute_weight(C[i], C[i - X], sigma) : 0;
- W[i][1] = i % X < X - 1 ? compute_weight(C[i], C[i + 1], sigma)
- : 0;
- W[i][2] =
- i < XY - X ? compute_weight(C[i], C[i + X], sigma) : 0;
- W[i][3] = i % X ? compute_weight(C[i], C[i - 1], sigma) : 0;
- }
-}
-
-// Compute M, the large but sparse matrix such that M * lambdas = 0.
-static void construct_M(double const C[XY], double const W[XY][4],
- double M[XY][4])
-{
- double epsilon = 0.001;
- for (int i = 0; i < XY; i++) {
- // Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
- // be zero so the equation is still set up correctly.
- int m = !!(i >= X) + !!(i % X < X - 1) + !!(i < XY - X) +
- !!(i % X); // total number of neighbours
- // we'll divide the diagonal out straight away
- double diagonal =
- (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) *
- C[i];
- M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) /
- diagonal
- : 0;
- M[i][1] = i % X < X - 1
- ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) /
- diagonal
- : 0;
- M[i][2] = i < XY - X
- ? (W[i][2] * C[i + X] + epsilon / m * C[i]) /
- diagonal
- : 0;
- M[i][3] = i % X ? (W[i][3] * C[i - 1] + epsilon / m * C[i]) /
- diagonal
- : 0;
- }
-}
-
-// In the compute_lambda_ functions, note that the matrix coefficients for the
-// left/right neighbours are zero down the left/right edges, so we don't need
-// need to test the i value to exclude them.
-static double compute_lambda_bottom(int i, double const M[XY][4],
- double lambda[XY])
-{
- return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + X] +
- M[i][3] * lambda[i - 1];
-}
-static double compute_lambda_bottom_start(int i, double const M[XY][4],
- double lambda[XY])
-{
- return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + X];
-}
-static double compute_lambda_interior(int i, double const M[XY][4],
- double lambda[XY])
-{
- return M[i][0] * lambda[i - X] + M[i][1] * lambda[i + 1] +
- M[i][2] * lambda[i + X] + M[i][3] * lambda[i - 1];
-}
-static double compute_lambda_top(int i, double const M[XY][4],
- double lambda[XY])
-{
- return M[i][0] * lambda[i - X] + M[i][1] * lambda[i + 1] +
- M[i][3] * lambda[i - 1];
-}
-static double compute_lambda_top_end(int i, double const M[XY][4],
- double lambda[XY])
-{
- return M[i][0] * lambda[i - X] + M[i][3] * lambda[i - 1];
-}
-
-// Gauss-Seidel iteration with over-relaxation.
-static double gauss_seidel2_SOR(double const M[XY][4], double omega,
- double lambda[XY], double lambda_bound)
-{
- const double min = 1 - lambda_bound, max = 1 + lambda_bound;
- double old_lambda[XY];
- int i;
- for (i = 0; i < XY; i++)
- old_lambda[i] = lambda[i];
- lambda[0] = compute_lambda_bottom_start(0, M, lambda);
- lambda[0] = std::clamp(lambda[0], min, max);
- for (i = 1; i < X; i++) {
- lambda[i] = compute_lambda_bottom(i, M, lambda);
- lambda[i] = std::clamp(lambda[i], min, max);
- }
- for (; i < XY - X; i++) {
- lambda[i] = compute_lambda_interior(i, M, lambda);
- lambda[i] = std::clamp(lambda[i], min, max);
- }
- for (; i < XY - 1; i++) {
- lambda[i] = compute_lambda_top(i, M, lambda);
- lambda[i] = std::clamp(lambda[i], min, max);
- }
- lambda[i] = compute_lambda_top_end(i, M, lambda);
- lambda[i] = std::clamp(lambda[i], min, max);
- // Also solve the system from bottom to top, to help spread the updates
- // better.
- lambda[i] = compute_lambda_top_end(i, M, lambda);
- lambda[i] = std::clamp(lambda[i], min, max);
- for (i = XY - 2; i >= XY - X; i--) {
- lambda[i] = compute_lambda_top(i, M, lambda);
- lambda[i] = std::clamp(lambda[i], min, max);
- }
- for (; i >= X; i--) {
- lambda[i] = compute_lambda_interior(i, M, lambda);
- lambda[i] = std::clamp(lambda[i], min, max);
- }
- for (; i >= 1; i--) {
- lambda[i] = compute_lambda_bottom(i, M, lambda);
- lambda[i] = std::clamp(lambda[i], min, max);
- }
- lambda[0] = compute_lambda_bottom_start(0, M, lambda);
- lambda[0] = std::clamp(lambda[0], min, max);
- double max_diff = 0;
- for (i = 0; i < XY; i++) {
- lambda[i] = old_lambda[i] + (lambda[i] - old_lambda[i]) * omega;
- if (fabs(lambda[i] - old_lambda[i]) > fabs(max_diff))
- max_diff = lambda[i] - old_lambda[i];
- }
- return max_diff;
-}
-
-// Normalise the values so that the smallest value is 1.
-static void normalise(double *ptr, size_t n)
-{
- double minval = ptr[0];
- for (size_t i = 1; i < n; i++)
- minval = std::min(minval, ptr[i]);
- for (size_t i = 0; i < n; i++)
- ptr[i] /= minval;
-}
-
-// Rescale the values so that the average value is 1.
-static void reaverage(Span<double> data)
-{
- double sum = std::accumulate(data.begin(), data.end(), 0.0);
- double ratio = 1 / (sum / data.size());
- for (double &d : data)
- d *= ratio;
-}
-
-static void run_matrix_iterations(double const C[XY], double lambda[XY],
- double const W[XY][4], double omega,
- int n_iter, double threshold, double lambda_bound)
-{
- double M[XY][4];
- construct_M(C, W, M);
- double last_max_diff = std::numeric_limits<double>::max();
- for (int i = 0; i < n_iter; i++) {
- double max_diff = fabs(gauss_seidel2_SOR(M, omega, lambda, lambda_bound));
- if (max_diff < threshold) {
- LOG(RPiAlsc, Debug)
- << "Stop after " << i + 1 << " iterations";
- break;
- }
- // this happens very occasionally (so make a note), though
- // doesn't seem to matter
- if (max_diff > last_max_diff)
- LOG(RPiAlsc, Debug)
- << "Iteration " << i << ": max_diff gone up "
- << last_max_diff << " to " << max_diff;
- last_max_diff = max_diff;
- }
- // We're going to normalise the lambdas so the total average is 1.
- reaverage({ lambda, XY });
-}
-
-static void add_luminance_rb(double result[XY], double const lambda[XY],
- double const luminance_lut[XY],
- double luminance_strength)
-{
- for (int i = 0; i < XY; i++)
- result[i] = lambda[i] *
- ((luminance_lut[i] - 1) * luminance_strength + 1);
-}
-
-static void add_luminance_g(double result[XY], double lambda,
- double const luminance_lut[XY],
- double luminance_strength)
-{
- for (int i = 0; i < XY; i++)
- result[i] = lambda *
- ((luminance_lut[i] - 1) * luminance_strength + 1);
-}
-
-void add_luminance_to_tables(double results[3][Y][X], double const lambda_r[XY],
- double lambda_g, double const lambda_b[XY],
- double const luminance_lut[XY],
- double luminance_strength)
-{
- add_luminance_rb((double *)results[0], lambda_r, luminance_lut,
- luminance_strength);
- add_luminance_g((double *)results[1], lambda_g, luminance_lut,
- luminance_strength);
- add_luminance_rb((double *)results[2], lambda_b, luminance_lut,
- luminance_strength);
- normalise((double *)results, 3 * XY);
-}
-
-void Alsc::doAlsc()
-{
- double Cr[XY], Cb[XY], Wr[XY][4], Wb[XY][4], cal_table_r[XY],
- cal_table_b[XY], cal_table_tmp[XY];
- // Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
- // usable.
- calculate_Cr_Cb(statistics_, Cr, Cb, config_.min_count, config_.min_G);
- // Fetch the new calibrations (if any) for this CT. Resample them in
- // case the camera mode is not full-frame.
- get_cal_table(ct_, config_.calibrations_Cr, cal_table_tmp);
- resample_cal_table(cal_table_tmp, camera_mode_, cal_table_r);
- get_cal_table(ct_, config_.calibrations_Cb, cal_table_tmp);
- resample_cal_table(cal_table_tmp, camera_mode_, cal_table_b);
- // You could print out the cal tables for this image here, if you're
- // tuning the algorithm...
- // Apply any calibration to the statistics, so the adaptive algorithm
- // makes only the extra adjustments.
- apply_cal_table(cal_table_r, Cr);
- apply_cal_table(cal_table_b, Cb);
- // Compute weights between zones.
- compute_W(Cr, config_.sigma_Cr, Wr);
- compute_W(Cb, config_.sigma_Cb, Wb);
- // Run Gauss-Seidel iterations over the resulting matrix, for R and B.
- run_matrix_iterations(Cr, lambda_r_, Wr, config_.omega, config_.n_iter,
- config_.threshold, config_.lambda_bound);
- run_matrix_iterations(Cb, lambda_b_, Wb, config_.omega, config_.n_iter,
- config_.threshold, config_.lambda_bound);
- // Fold the calibrated gains into our final lambda values. (Note that on
- // the next run, we re-start with the lambda values that don't have the
- // calibration gains included.)
- compensate_lambdas_for_cal(cal_table_r, lambda_r_, async_lambda_r_);
- compensate_lambdas_for_cal(cal_table_b, lambda_b_, async_lambda_b_);
- // Fold in the luminance table at the appropriate strength.
- add_luminance_to_tables(async_results_, async_lambda_r_, 1.0,
- async_lambda_b_, luminance_table_,
- config_.luminance_strength);
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Alsc(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.hpp b/src/ipa/raspberrypi/controller/rpi/alsc.hpp
deleted file mode 100644
index d1dbe0d1..00000000
--- a/src/ipa/raspberrypi/controller/rpi/alsc.hpp
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * alsc.hpp - ALSC (auto lens shading correction) control algorithm
- */
-#pragma once
-
-#include <mutex>
-#include <condition_variable>
-#include <thread>
-
-#include "../algorithm.hpp"
-#include "../alsc_status.h"
-
-namespace RPiController {
-
-// Algorithm to generate automagic LSC (Lens Shading Correction) tables.
-
-struct AlscCalibration {
- double ct;
- double table[ALSC_CELLS_X * ALSC_CELLS_Y];
-};
-
-struct AlscConfig {
- // Only repeat the ALSC calculation every "this many" frames
- uint16_t frame_period;
- // number of initial frames for which speed taken as 1.0 (maximum)
- uint16_t startup_frames;
- // IIR filter speed applied to algorithm results
- double speed;
- double sigma_Cr;
- double sigma_Cb;
- double min_count;
- uint16_t min_G;
- double omega;
- uint32_t n_iter;
- double luminance_lut[ALSC_CELLS_X * ALSC_CELLS_Y];
- double luminance_strength;
- std::vector<AlscCalibration> calibrations_Cr;
- std::vector<AlscCalibration> calibrations_Cb;
- double default_ct; // colour temperature if no metadata found
- double threshold; // iteration termination threshold
- double lambda_bound; // upper/lower bound for lambda from a value of 1
-};
-
-class Alsc : public Algorithm
-{
-public:
- Alsc(Controller *controller = NULL);
- ~Alsc();
- char const *Name() const override;
- void Initialise() override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
-
-private:
- // configuration is read-only, and available to both threads
- AlscConfig config_;
- bool first_time_;
- CameraMode camera_mode_;
- double luminance_table_[ALSC_CELLS_X * ALSC_CELLS_Y];
- std::thread async_thread_;
- void asyncFunc(); // asynchronous thread function
- std::mutex mutex_;
- // condvar for async thread to wait on
- std::condition_variable async_signal_;
- // condvar for synchronous thread to wait on
- std::condition_variable sync_signal_;
- // for sync thread to check if async thread finished (requires mutex)
- bool async_finished_;
- // for async thread to check if it's been told to run (requires mutex)
- bool async_start_;
- // for async thread to check if it's been told to quit (requires mutex)
- bool async_abort_;
-
- // The following are only for the synchronous thread to use:
- // for sync thread to note its has asked async thread to run
- bool async_started_;
- // counts up to frame_period before restarting the async thread
- int frame_phase_;
- // counts up to startup_frames
- int frame_count_;
- // counts up to startup_frames for Process function
- int frame_count2_;
- double sync_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
- double prev_sync_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
- void waitForAysncThread();
- // The following are for the asynchronous thread to use, though the main
- // thread can set/reset them if the async thread is known to be idle:
- void restartAsync(StatisticsPtr &stats, Metadata *image_metadata);
- // copy out the results from the async thread so that it can be restarted
- void fetchAsyncResults();
- double ct_;
- bcm2835_isp_stats_region statistics_[ALSC_CELLS_Y * ALSC_CELLS_X];
- double async_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
- double async_lambda_r_[ALSC_CELLS_X * ALSC_CELLS_Y];
- double async_lambda_b_[ALSC_CELLS_X * ALSC_CELLS_Y];
- void doAlsc();
- double lambda_r_[ALSC_CELLS_X * ALSC_CELLS_Y];
- double lambda_b_[ALSC_CELLS_X * ALSC_CELLS_Y];
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.cpp b/src/ipa/raspberrypi/controller/rpi/awb.cpp
deleted file mode 100644
index d4c93447..00000000
--- a/src/ipa/raspberrypi/controller/rpi/awb.cpp
+++ /dev/null
@@ -1,667 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * awb.cpp - AWB control algorithm
- */
-
-#include <libcamera/base/log.h>
-
-#include "../lux_status.h"
-
-#include "awb.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiAwb)
-
-#define NAME "rpi.awb"
-
-#define AWB_STATS_SIZE_X DEFAULT_AWB_REGIONS_X
-#define AWB_STATS_SIZE_Y DEFAULT_AWB_REGIONS_Y
-
-// todo - the locking in this algorithm needs some tidying up as has been done
-// elsewhere (ALSC and AGC).
-
-void AwbMode::Read(boost::property_tree::ptree const &params)
-{
- ct_lo = params.get<double>("lo");
- ct_hi = params.get<double>("hi");
-}
-
-void AwbPrior::Read(boost::property_tree::ptree const &params)
-{
- lux = params.get<double>("lux");
- prior.Read(params.get_child("prior"));
-}
-
-static void read_ct_curve(Pwl &ct_r, Pwl &ct_b,
- boost::property_tree::ptree const &params)
-{
- int num = 0;
- for (auto it = params.begin(); it != params.end(); it++) {
- double ct = it->second.get_value<double>();
- assert(it == params.begin() || ct != ct_r.Domain().end);
- if (++it == params.end())
- throw std::runtime_error(
- "AwbConfig: incomplete CT curve entry");
- ct_r.Append(ct, it->second.get_value<double>());
- if (++it == params.end())
- throw std::runtime_error(
- "AwbConfig: incomplete CT curve entry");
- ct_b.Append(ct, it->second.get_value<double>());
- num++;
- }
- if (num < 2)
- throw std::runtime_error(
- "AwbConfig: insufficient points in CT curve");
-}
-
-void AwbConfig::Read(boost::property_tree::ptree const &params)
-{
- bayes = params.get<int>("bayes", 1);
- frame_period = params.get<uint16_t>("frame_period", 10);
- startup_frames = params.get<uint16_t>("startup_frames", 10);
- convergence_frames = params.get<unsigned int>("convergence_frames", 3);
- speed = params.get<double>("speed", 0.05);
- if (params.get_child_optional("ct_curve"))
- read_ct_curve(ct_r, ct_b, params.get_child("ct_curve"));
- if (params.get_child_optional("priors")) {
- for (auto &p : params.get_child("priors")) {
- AwbPrior prior;
- prior.Read(p.second);
- if (!priors.empty() && prior.lux <= priors.back().lux)
- throw std::runtime_error(
- "AwbConfig: Prior must be ordered in increasing lux value");
- priors.push_back(prior);
- }
- if (priors.empty())
- throw std::runtime_error(
- "AwbConfig: no AWB priors configured");
- }
- if (params.get_child_optional("modes")) {
- for (auto &p : params.get_child("modes")) {
- modes[p.first].Read(p.second);
- if (default_mode == nullptr)
- default_mode = &modes[p.first];
- }
- if (default_mode == nullptr)
- throw std::runtime_error(
- "AwbConfig: no AWB modes configured");
- }
- min_pixels = params.get<double>("min_pixels", 16.0);
- min_G = params.get<uint16_t>("min_G", 32);
- min_regions = params.get<uint32_t>("min_regions", 10);
- delta_limit = params.get<double>("delta_limit", 0.2);
- coarse_step = params.get<double>("coarse_step", 0.2);
- transverse_pos = params.get<double>("transverse_pos", 0.01);
- transverse_neg = params.get<double>("transverse_neg", 0.01);
- if (transverse_pos <= 0 || transverse_neg <= 0)
- throw std::runtime_error(
- "AwbConfig: transverse_pos/neg must be > 0");
- sensitivity_r = params.get<double>("sensitivity_r", 1.0);
- sensitivity_b = params.get<double>("sensitivity_b", 1.0);
- if (bayes) {
- if (ct_r.Empty() || ct_b.Empty() || priors.empty() ||
- default_mode == nullptr) {
- LOG(RPiAwb, Warning)
- << "Bayesian AWB mis-configured - switch to Grey method";
- bayes = false;
- }
- }
- fast = params.get<int>(
- "fast", bayes); // default to fast for Bayesian, otherwise slow
- whitepoint_r = params.get<double>("whitepoint_r", 0.0);
- whitepoint_b = params.get<double>("whitepoint_b", 0.0);
- if (bayes == false)
- sensitivity_r = sensitivity_b =
- 1.0; // nor do sensitivities make any sense
-}
-
-Awb::Awb(Controller *controller)
- : AwbAlgorithm(controller)
-{
- async_abort_ = async_start_ = async_started_ = async_finished_ = false;
- mode_ = nullptr;
- manual_r_ = manual_b_ = 0.0;
- first_switch_mode_ = true;
- async_thread_ = std::thread(std::bind(&Awb::asyncFunc, this));
-}
-
-Awb::~Awb()
-{
- {
- std::lock_guard<std::mutex> lock(mutex_);
- async_abort_ = true;
- }
- async_signal_.notify_one();
- async_thread_.join();
-}
-
-char const *Awb::Name() const
-{
- return NAME;
-}
-
-void Awb::Read(boost::property_tree::ptree const &params)
-{
- config_.Read(params);
-}
-
-void Awb::Initialise()
-{
- frame_count_ = frame_phase_ = 0;
- // Put something sane into the status that we are filtering towards,
- // just in case the first few frames don't have anything meaningful in
- // them.
- if (!config_.ct_r.Empty() && !config_.ct_b.Empty()) {
- sync_results_.temperature_K = config_.ct_r.Domain().Clip(4000);
- sync_results_.gain_r =
- 1.0 / config_.ct_r.Eval(sync_results_.temperature_K);
- sync_results_.gain_g = 1.0;
- sync_results_.gain_b =
- 1.0 / config_.ct_b.Eval(sync_results_.temperature_K);
- } else {
- // random values just to stop the world blowing up
- sync_results_.temperature_K = 4500;
- sync_results_.gain_r = sync_results_.gain_g =
- sync_results_.gain_b = 1.0;
- }
- prev_sync_results_ = sync_results_;
- async_results_ = sync_results_;
-}
-
-bool Awb::IsPaused() const
-{
- return false;
-}
-
-void Awb::Pause()
-{
- // "Pause" by fixing everything to the most recent values.
- manual_r_ = sync_results_.gain_r = prev_sync_results_.gain_r;
- manual_b_ = sync_results_.gain_b = prev_sync_results_.gain_b;
- sync_results_.gain_g = prev_sync_results_.gain_g;
- sync_results_.temperature_K = prev_sync_results_.temperature_K;
-}
-
-void Awb::Resume()
-{
- manual_r_ = 0.0;
- manual_b_ = 0.0;
-}
-
-unsigned int Awb::GetConvergenceFrames() const
-{
- // If not in auto mode, there is no convergence
- // to happen, so no need to drop any frames - return zero.
- if (!isAutoEnabled())
- return 0;
- else
- return config_.convergence_frames;
-}
-
-void Awb::SetMode(std::string const &mode_name)
-{
- mode_name_ = mode_name;
-}
-
-void Awb::SetManualGains(double manual_r, double manual_b)
-{
- // If any of these are 0.0, we swich back to auto.
- manual_r_ = manual_r;
- manual_b_ = manual_b;
- // If not in auto mode, set these values into the sync_results which
- // means that Prepare() will adopt them immediately.
- if (!isAutoEnabled()) {
- sync_results_.gain_r = prev_sync_results_.gain_r = manual_r_;
- sync_results_.gain_g = prev_sync_results_.gain_g = 1.0;
- sync_results_.gain_b = prev_sync_results_.gain_b = manual_b_;
- }
-}
-
-void Awb::SwitchMode([[maybe_unused]] CameraMode const &camera_mode,
- Metadata *metadata)
-{
- // On the first mode switch we'll have no meaningful colour
- // temperature, so try to dead reckon one if in manual mode.
- if (!isAutoEnabled() && first_switch_mode_ && config_.bayes) {
- Pwl ct_r_inverse = config_.ct_r.Inverse();
- Pwl ct_b_inverse = config_.ct_b.Inverse();
- double ct_r = ct_r_inverse.Eval(ct_r_inverse.Domain().Clip(1 / manual_r_));
- double ct_b = ct_b_inverse.Eval(ct_b_inverse.Domain().Clip(1 / manual_b_));
- prev_sync_results_.temperature_K = (ct_r + ct_b) / 2;
- sync_results_.temperature_K = prev_sync_results_.temperature_K;
- }
- // Let other algorithms know the current white balance values.
- metadata->Set("awb.status", prev_sync_results_);
- first_switch_mode_ = false;
-}
-
-bool Awb::isAutoEnabled() const
-{
- return manual_r_ == 0.0 || manual_b_ == 0.0;
-}
-
-void Awb::fetchAsyncResults()
-{
- LOG(RPiAwb, Debug) << "Fetch AWB results";
- async_finished_ = false;
- async_started_ = false;
- // It's possible manual gains could be set even while the async
- // thread was running, so only copy the results if still in auto mode.
- if (isAutoEnabled())
- sync_results_ = async_results_;
-}
-
-void Awb::restartAsync(StatisticsPtr &stats, double lux)
-{
- LOG(RPiAwb, Debug) << "Starting AWB calculation";
- // this makes a new reference which belongs to the asynchronous thread
- statistics_ = stats;
- // store the mode as it could technically change
- auto m = config_.modes.find(mode_name_);
- mode_ = m != config_.modes.end()
- ? &m->second
- : (mode_ == nullptr ? config_.default_mode : mode_);
- lux_ = lux;
- frame_phase_ = 0;
- async_started_ = true;
- size_t len = mode_name_.copy(async_results_.mode,
- sizeof(async_results_.mode) - 1);
- async_results_.mode[len] = '\0';
- {
- std::lock_guard<std::mutex> lock(mutex_);
- async_start_ = true;
- }
- async_signal_.notify_one();
-}
-
-void Awb::Prepare(Metadata *image_metadata)
-{
- if (frame_count_ < (int)config_.startup_frames)
- frame_count_++;
- double speed = frame_count_ < (int)config_.startup_frames
- ? 1.0
- : config_.speed;
- LOG(RPiAwb, Debug)
- << "frame_count " << frame_count_ << " speed " << speed;
- {
- std::unique_lock<std::mutex> lock(mutex_);
- if (async_started_ && async_finished_)
- fetchAsyncResults();
- }
- // Finally apply IIR filter to results and put into metadata.
- memcpy(prev_sync_results_.mode, sync_results_.mode,
- sizeof(prev_sync_results_.mode));
- prev_sync_results_.temperature_K =
- speed * sync_results_.temperature_K +
- (1.0 - speed) * prev_sync_results_.temperature_K;
- prev_sync_results_.gain_r = speed * sync_results_.gain_r +
- (1.0 - speed) * prev_sync_results_.gain_r;
- prev_sync_results_.gain_g = speed * sync_results_.gain_g +
- (1.0 - speed) * prev_sync_results_.gain_g;
- prev_sync_results_.gain_b = speed * sync_results_.gain_b +
- (1.0 - speed) * prev_sync_results_.gain_b;
- image_metadata->Set("awb.status", prev_sync_results_);
- LOG(RPiAwb, Debug)
- << "Using AWB gains r " << prev_sync_results_.gain_r << " g "
- << prev_sync_results_.gain_g << " b "
- << prev_sync_results_.gain_b;
-}
-
-void Awb::Process(StatisticsPtr &stats, Metadata *image_metadata)
-{
- // Count frames since we last poked the async thread.
- if (frame_phase_ < (int)config_.frame_period)
- frame_phase_++;
- LOG(RPiAwb, Debug) << "frame_phase " << frame_phase_;
- // We do not restart the async thread if we're not in auto mode.
- if (isAutoEnabled() &&
- (frame_phase_ >= (int)config_.frame_period ||
- frame_count_ < (int)config_.startup_frames)) {
- // Update any settings and any image metadata that we need.
- struct LuxStatus lux_status = {};
- lux_status.lux = 400; // in case no metadata
- if (image_metadata->Get("lux.status", lux_status) != 0)
- LOG(RPiAwb, Debug) << "No lux metadata found";
- LOG(RPiAwb, Debug) << "Awb lux value is " << lux_status.lux;
-
- if (async_started_ == false)
- restartAsync(stats, lux_status.lux);
- }
-}
-
-void Awb::asyncFunc()
-{
- while (true) {
- {
- std::unique_lock<std::mutex> lock(mutex_);
- async_signal_.wait(lock, [&] {
- return async_start_ || async_abort_;
- });
- async_start_ = false;
- if (async_abort_)
- break;
- }
- doAwb();
- {
- std::lock_guard<std::mutex> lock(mutex_);
- async_finished_ = true;
- }
- sync_signal_.notify_one();
- }
-}
-
-static void generate_stats(std::vector<Awb::RGB> &zones,
- bcm2835_isp_stats_region *stats, double min_pixels,
- double min_G)
-{
- for (int i = 0; i < AWB_STATS_SIZE_X * AWB_STATS_SIZE_Y; i++) {
- Awb::RGB zone;
- double counted = stats[i].counted;
- if (counted >= min_pixels) {
- zone.G = stats[i].g_sum / counted;
- if (zone.G >= min_G) {
- zone.R = stats[i].r_sum / counted;
- zone.B = stats[i].b_sum / counted;
- zones.push_back(zone);
- }
- }
- }
-}
-
-void Awb::prepareStats()
-{
- zones_.clear();
- // LSC has already been applied to the stats in this pipeline, so stop
- // any LSC compensation. We also ignore config_.fast in this version.
- generate_stats(zones_, statistics_->awb_stats, config_.min_pixels,
- config_.min_G);
- // we're done with these; we may as well relinquish our hold on the
- // pointer.
- statistics_.reset();
- // apply sensitivities, so values appear to come from our "canonical"
- // sensor.
- for (auto &zone : zones_)
- zone.R *= config_.sensitivity_r,
- zone.B *= config_.sensitivity_b;
-}
-
-double Awb::computeDelta2Sum(double gain_r, double gain_b)
-{
- // Compute the sum of the squared colour error (non-greyness) as it
- // appears in the log likelihood equation.
- double delta2_sum = 0;
- for (auto &z : zones_) {
- double delta_r = gain_r * z.R - 1 - config_.whitepoint_r;
- double delta_b = gain_b * z.B - 1 - config_.whitepoint_b;
- double delta2 = delta_r * delta_r + delta_b * delta_b;
- //LOG(RPiAwb, Debug) << "delta_r " << delta_r << " delta_b " << delta_b << " delta2 " << delta2;
- delta2 = std::min(delta2, config_.delta_limit);
- delta2_sum += delta2;
- }
- return delta2_sum;
-}
-
-Pwl Awb::interpolatePrior()
-{
- // Interpolate the prior log likelihood function for our current lux
- // value.
- if (lux_ <= config_.priors.front().lux)
- return config_.priors.front().prior;
- else if (lux_ >= config_.priors.back().lux)
- return config_.priors.back().prior;
- else {
- int idx = 0;
- // find which two we lie between
- while (config_.priors[idx + 1].lux < lux_)
- idx++;
- double lux0 = config_.priors[idx].lux,
- lux1 = config_.priors[idx + 1].lux;
- return Pwl::Combine(config_.priors[idx].prior,
- config_.priors[idx + 1].prior,
- [&](double /*x*/, double y0, double y1) {
- return y0 + (y1 - y0) *
- (lux_ - lux0) / (lux1 - lux0);
- });
- }
-}
-
-static double interpolate_quadatric(Pwl::Point const &A, Pwl::Point const &B,
- Pwl::Point const &C)
-{
- // Given 3 points on a curve, find the extremum of the function in that
- // interval by fitting a quadratic.
- const double eps = 1e-3;
- Pwl::Point CA = C - A, BA = B - A;
- double denominator = 2 * (BA.y * CA.x - CA.y * BA.x);
- if (abs(denominator) > eps) {
- double numerator = BA.y * CA.x * CA.x - CA.y * BA.x * BA.x;
- double result = numerator / denominator + A.x;
- return std::max(A.x, std::min(C.x, result));
- }
- // has degenerated to straight line segment
- return A.y < C.y - eps ? A.x : (C.y < A.y - eps ? C.x : B.x);
-}
-
-double Awb::coarseSearch(Pwl const &prior)
-{
- points_.clear(); // assume doesn't deallocate memory
- size_t best_point = 0;
- double t = mode_->ct_lo;
- int span_r = 0, span_b = 0;
- // Step down the CT curve evaluating log likelihood.
- while (true) {
- double r = config_.ct_r.Eval(t, &span_r);
- double b = config_.ct_b.Eval(t, &span_b);
- double gain_r = 1 / r, gain_b = 1 / b;
- double delta2_sum = computeDelta2Sum(gain_r, gain_b);
- double prior_log_likelihood =
- prior.Eval(prior.Domain().Clip(t));
- double final_log_likelihood = delta2_sum - prior_log_likelihood;
- LOG(RPiAwb, Debug)
- << "t: " << t << " gain_r " << gain_r << " gain_b "
- << gain_b << " delta2_sum " << delta2_sum
- << " prior " << prior_log_likelihood << " final "
- << final_log_likelihood;
- points_.push_back(Pwl::Point(t, final_log_likelihood));
- if (points_.back().y < points_[best_point].y)
- best_point = points_.size() - 1;
- if (t == mode_->ct_hi)
- break;
- // for even steps along the r/b curve scale them by the current t
- t = std::min(t + t / 10 * config_.coarse_step,
- mode_->ct_hi);
- }
- t = points_[best_point].x;
- LOG(RPiAwb, Debug) << "Coarse search found CT " << t;
- // We have the best point of the search, but refine it with a quadratic
- // interpolation around its neighbours.
- if (points_.size() > 2) {
- unsigned long bp = std::min(best_point, points_.size() - 2);
- best_point = std::max(1UL, bp);
- t = interpolate_quadatric(points_[best_point - 1],
- points_[best_point],
- points_[best_point + 1]);
- LOG(RPiAwb, Debug)
- << "After quadratic refinement, coarse search has CT "
- << t;
- }
- return t;
-}
-
-void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
-{
- int span_r = -1, span_b = -1;
- config_.ct_r.Eval(t, &span_r);
- config_.ct_b.Eval(t, &span_b);
- double step = t / 10 * config_.coarse_step * 0.1;
- int nsteps = 5;
- double r_diff = config_.ct_r.Eval(t + nsteps * step, &span_r) -
- config_.ct_r.Eval(t - nsteps * step, &span_r);
- double b_diff = config_.ct_b.Eval(t + nsteps * step, &span_b) -
- config_.ct_b.Eval(t - nsteps * step, &span_b);
- Pwl::Point transverse(b_diff, -r_diff);
- if (transverse.Len2() < 1e-6)
- return;
- // unit vector orthogonal to the b vs. r function (pointing outwards
- // with r and b increasing)
- transverse = transverse / transverse.Len();
- double best_log_likelihood = 0, best_t = 0, best_r = 0, best_b = 0;
- double transverse_range =
- config_.transverse_neg + config_.transverse_pos;
- const int MAX_NUM_DELTAS = 12;
- // a transverse step approximately every 0.01 r/b units
- int num_deltas = floor(transverse_range * 100 + 0.5) + 1;
- num_deltas = num_deltas < 3 ? 3 :
- (num_deltas > MAX_NUM_DELTAS ? MAX_NUM_DELTAS : num_deltas);
- // Step down CT curve. March a bit further if the transverse range is
- // large.
- nsteps += num_deltas;
- for (int i = -nsteps; i <= nsteps; i++) {
- double t_test = t + i * step;
- double prior_log_likelihood =
- prior.Eval(prior.Domain().Clip(t_test));
- double r_curve = config_.ct_r.Eval(t_test, &span_r);
- double b_curve = config_.ct_b.Eval(t_test, &span_b);
- // x will be distance off the curve, y the log likelihood there
- Pwl::Point points[MAX_NUM_DELTAS];
- int best_point = 0;
- // Take some measurements transversely *off* the CT curve.
- for (int j = 0; j < num_deltas; j++) {
- points[j].x = -config_.transverse_neg +
- (transverse_range * j) / (num_deltas - 1);
- Pwl::Point rb_test = Pwl::Point(r_curve, b_curve) +
- transverse * points[j].x;
- double r_test = rb_test.x, b_test = rb_test.y;
- double gain_r = 1 / r_test, gain_b = 1 / b_test;
- double delta2_sum = computeDelta2Sum(gain_r, gain_b);
- points[j].y = delta2_sum - prior_log_likelihood;
- LOG(RPiAwb, Debug)
- << "At t " << t_test << " r " << r_test << " b "
- << b_test << ": " << points[j].y;
- if (points[j].y < points[best_point].y)
- best_point = j;
- }
- // We have NUM_DELTAS points transversely across the CT curve,
- // now let's do a quadratic interpolation for the best result.
- best_point = std::max(1, std::min(best_point, num_deltas - 2));
- Pwl::Point rb_test =
- Pwl::Point(r_curve, b_curve) +
- transverse *
- interpolate_quadatric(points[best_point - 1],
- points[best_point],
- points[best_point + 1]);
- double r_test = rb_test.x, b_test = rb_test.y;
- double gain_r = 1 / r_test, gain_b = 1 / b_test;
- double delta2_sum = computeDelta2Sum(gain_r, gain_b);
- double final_log_likelihood = delta2_sum - prior_log_likelihood;
- LOG(RPiAwb, Debug)
- << "Finally "
- << t_test << " r " << r_test << " b " << b_test << ": "
- << final_log_likelihood
- << (final_log_likelihood < best_log_likelihood ? " BEST" : "");
- if (best_t == 0 || final_log_likelihood < best_log_likelihood)
- best_log_likelihood = final_log_likelihood,
- best_t = t_test, best_r = r_test, best_b = b_test;
- }
- t = best_t, r = best_r, b = best_b;
- LOG(RPiAwb, Debug)
- << "Fine search found t " << t << " r " << r << " b " << b;
-}
-
-void Awb::awbBayes()
-{
- // May as well divide out G to save computeDelta2Sum from doing it over
- // and over.
- for (auto &z : zones_)
- z.R = z.R / (z.G + 1), z.B = z.B / (z.G + 1);
- // Get the current prior, and scale according to how many zones are
- // valid... not entirely sure about this.
- Pwl prior = interpolatePrior();
- prior *= zones_.size() / (double)(AWB_STATS_SIZE_X * AWB_STATS_SIZE_Y);
- prior.Map([](double x, double y) {
- LOG(RPiAwb, Debug) << "(" << x << "," << y << ")";
- });
- double t = coarseSearch(prior);
- double r = config_.ct_r.Eval(t);
- double b = config_.ct_b.Eval(t);
- LOG(RPiAwb, Debug)
- << "After coarse search: r " << r << " b " << b << " (gains r "
- << 1 / r << " b " << 1 / b << ")";
- // Not entirely sure how to handle the fine search yet. Mostly the
- // estimated CT is already good enough, but the fine search allows us to
- // wander transverely off the CT curve. Under some illuminants, where
- // there may be more or less green light, this may prove beneficial,
- // though I probably need more real datasets before deciding exactly how
- // this should be controlled and tuned.
- fineSearch(t, r, b, prior);
- LOG(RPiAwb, Debug)
- << "After fine search: r " << r << " b " << b << " (gains r "
- << 1 / r << " b " << 1 / b << ")";
- // Write results out for the main thread to pick up. Remember to adjust
- // the gains from the ones that the "canonical sensor" would require to
- // the ones needed by *this* sensor.
- async_results_.temperature_K = t;
- async_results_.gain_r = 1.0 / r * config_.sensitivity_r;
- async_results_.gain_g = 1.0;
- async_results_.gain_b = 1.0 / b * config_.sensitivity_b;
-}
-
-void Awb::awbGrey()
-{
- LOG(RPiAwb, Debug) << "Grey world AWB";
- // Make a separate list of the derivatives for each of red and blue, so
- // that we can sort them to exclude the extreme gains. We could
- // consider some variations, such as normalising all the zones first, or
- // doing an L2 average etc.
- std::vector<RGB> &derivs_R(zones_);
- std::vector<RGB> derivs_B(derivs_R);
- std::sort(derivs_R.begin(), derivs_R.end(),
- [](RGB const &a, RGB const &b) {
- return a.G * b.R < b.G * a.R;
- });
- std::sort(derivs_B.begin(), derivs_B.end(),
- [](RGB const &a, RGB const &b) {
- return a.G * b.B < b.G * a.B;
- });
- // Average the middle half of the values.
- int discard = derivs_R.size() / 4;
- RGB sum_R(0, 0, 0), sum_B(0, 0, 0);
- for (auto ri = derivs_R.begin() + discard,
- bi = derivs_B.begin() + discard;
- ri != derivs_R.end() - discard; ri++, bi++)
- sum_R += *ri, sum_B += *bi;
- double gain_r = sum_R.G / (sum_R.R + 1),
- gain_b = sum_B.G / (sum_B.B + 1);
- async_results_.temperature_K = 4500; // don't know what it is
- async_results_.gain_r = gain_r;
- async_results_.gain_g = 1.0;
- async_results_.gain_b = gain_b;
-}
-
-void Awb::doAwb()
-{
- prepareStats();
- LOG(RPiAwb, Debug) << "Valid zones: " << zones_.size();
- if (zones_.size() > config_.min_regions) {
- if (config_.bayes)
- awbBayes();
- else
- awbGrey();
- LOG(RPiAwb, Debug)
- << "CT found is "
- << async_results_.temperature_K
- << " with gains r " << async_results_.gain_r
- << " and b " << async_results_.gain_b;
- }
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Awb(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.hpp b/src/ipa/raspberrypi/controller/rpi/awb.hpp
deleted file mode 100644
index ac3dca6f..00000000
--- a/src/ipa/raspberrypi/controller/rpi/awb.hpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * awb.hpp - AWB control algorithm
- */
-#pragma once
-
-#include <mutex>
-#include <condition_variable>
-#include <thread>
-
-#include "../awb_algorithm.hpp"
-#include "../pwl.hpp"
-#include "../awb_status.h"
-
-namespace RPiController {
-
-// Control algorithm to perform AWB calculations.
-
-struct AwbMode {
- void Read(boost::property_tree::ptree const &params);
- double ct_lo; // low CT value for search
- double ct_hi; // high CT value for search
-};
-
-struct AwbPrior {
- void Read(boost::property_tree::ptree const &params);
- double lux; // lux level
- Pwl prior; // maps CT to prior log likelihood for this lux level
-};
-
-struct AwbConfig {
- AwbConfig() : default_mode(nullptr) {}
- void Read(boost::property_tree::ptree const &params);
- // Only repeat the AWB calculation every "this many" frames
- uint16_t frame_period;
- // number of initial frames for which speed taken as 1.0 (maximum)
- uint16_t startup_frames;
- unsigned int convergence_frames; // approx number of frames to converge
- double speed; // IIR filter speed applied to algorithm results
- bool fast; // "fast" mode uses a 16x16 rather than 32x32 grid
- Pwl ct_r; // function maps CT to r (= R/G)
- Pwl ct_b; // function maps CT to b (= B/G)
- // table of illuminant priors at different lux levels
- std::vector<AwbPrior> priors;
- // AWB "modes" (determines the search range)
- std::map<std::string, AwbMode> modes;
- AwbMode *default_mode; // mode used if no mode selected
- // minimum proportion of pixels counted within AWB region for it to be
- // "useful"
- double min_pixels;
- // minimum G value of those pixels, to be regarded a "useful"
- uint16_t min_G;
- // number of AWB regions that must be "useful" in order to do the AWB
- // calculation
- uint32_t min_regions;
- // clamp on colour error term (so as not to penalise non-grey excessively)
- double delta_limit;
- // step size control in coarse search
- double coarse_step;
- // how far to wander off CT curve towards "more purple"
- double transverse_pos;
- // how far to wander off CT curve towards "more green"
- double transverse_neg;
- // red sensitivity ratio (set to canonical sensor's R/G divided by this
- // sensor's R/G)
- double sensitivity_r;
- // blue sensitivity ratio (set to canonical sensor's B/G divided by this
- // sensor's B/G)
- double sensitivity_b;
- // The whitepoint (which we normally "aim" for) can be moved.
- double whitepoint_r;
- double whitepoint_b;
- bool bayes; // use Bayesian algorithm
-};
-
-class Awb : public AwbAlgorithm
-{
-public:
- Awb(Controller *controller = NULL);
- ~Awb();
- char const *Name() const override;
- void Initialise() override;
- void Read(boost::property_tree::ptree const &params) override;
- // AWB handles "pausing" for itself.
- bool IsPaused() const override;
- void Pause() override;
- void Resume() override;
- unsigned int GetConvergenceFrames() const override;
- void SetMode(std::string const &name) override;
- void SetManualGains(double manual_r, double manual_b) override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
- struct RGB {
- RGB(double _R = 0, double _G = 0, double _B = 0)
- : R(_R), G(_G), B(_B)
- {
- }
- double R, G, B;
- RGB &operator+=(RGB const &other)
- {
- R += other.R, G += other.G, B += other.B;
- return *this;
- }
- };
-
-private:
- bool isAutoEnabled() const;
- // configuration is read-only, and available to both threads
- AwbConfig config_;
- std::thread async_thread_;
- void asyncFunc(); // asynchronous thread function
- std::mutex mutex_;
- // condvar for async thread to wait on
- std::condition_variable async_signal_;
- // condvar for synchronous thread to wait on
- std::condition_variable sync_signal_;
- // for sync thread to check if async thread finished (requires mutex)
- bool async_finished_;
- // for async thread to check if it's been told to run (requires mutex)
- bool async_start_;
- // for async thread to check if it's been told to quit (requires mutex)
- bool async_abort_;
-
- // The following are only for the synchronous thread to use:
- // for sync thread to note its has asked async thread to run
- bool async_started_;
- // counts up to frame_period before restarting the async thread
- int frame_phase_;
- int frame_count_; // counts up to startup_frames
- AwbStatus sync_results_;
- AwbStatus prev_sync_results_;
- std::string mode_name_;
- // The following are for the asynchronous thread to use, though the main
- // thread can set/reset them if the async thread is known to be idle:
- void restartAsync(StatisticsPtr &stats, double lux);
- // copy out the results from the async thread so that it can be restarted
- void fetchAsyncResults();
- StatisticsPtr statistics_;
- AwbMode *mode_;
- double lux_;
- AwbStatus async_results_;
- void doAwb();
- void awbBayes();
- void awbGrey();
- void prepareStats();
- double computeDelta2Sum(double gain_r, double gain_b);
- Pwl interpolatePrior();
- double coarseSearch(Pwl const &prior);
- void fineSearch(double &t, double &r, double &b, Pwl const &prior);
- std::vector<RGB> zones_;
- std::vector<Pwl::Point> points_;
- // manual r setting
- double manual_r_;
- // manual b setting
- double manual_b_;
- bool first_switch_mode_; // is this the first call to SwitchMode?
-};
-
-static inline Awb::RGB operator+(Awb::RGB const &a, Awb::RGB const &b)
-{
- return Awb::RGB(a.R + b.R, a.G + b.G, a.B + b.B);
-}
-static inline Awb::RGB operator-(Awb::RGB const &a, Awb::RGB const &b)
-{
- return Awb::RGB(a.R - b.R, a.G - b.G, a.B - b.B);
-}
-static inline Awb::RGB operator*(double d, Awb::RGB const &rgb)
-{
- return Awb::RGB(d * rgb.R, d * rgb.G, d * rgb.B);
-}
-static inline Awb::RGB operator*(Awb::RGB const &rgb, double d)
-{
- return d * rgb;
-}
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.cpp b/src/ipa/raspberrypi/controller/rpi/black_level.cpp
deleted file mode 100644
index 6b3497f1..00000000
--- a/src/ipa/raspberrypi/controller/rpi/black_level.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * black_level.cpp - black level control algorithm
- */
-
-#include <math.h>
-#include <stdint.h>
-
-#include <libcamera/base/log.h>
-
-#include "../black_level_status.h"
-
-#include "black_level.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiBlackLevel)
-
-#define NAME "rpi.black_level"
-
-BlackLevel::BlackLevel(Controller *controller)
- : Algorithm(controller)
-{
-}
-
-char const *BlackLevel::Name() const
-{
- return NAME;
-}
-
-void BlackLevel::Read(boost::property_tree::ptree const &params)
-{
- uint16_t black_level = params.get<uint16_t>(
- "black_level", 4096); // 64 in 10 bits scaled to 16 bits
- black_level_r_ = params.get<uint16_t>("black_level_r", black_level);
- black_level_g_ = params.get<uint16_t>("black_level_g", black_level);
- black_level_b_ = params.get<uint16_t>("black_level_b", black_level);
- LOG(RPiBlackLevel, Debug)
- << " Read black levels red " << black_level_r_
- << " green " << black_level_g_
- << " blue " << black_level_b_;
-}
-
-void BlackLevel::Prepare(Metadata *image_metadata)
-{
- // Possibly we should think about doing this in a switch_mode or
- // something?
- struct BlackLevelStatus status;
- status.black_level_r = black_level_r_;
- status.black_level_g = black_level_g_;
- status.black_level_b = black_level_b_;
- image_metadata->Set("black_level.status", status);
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return new BlackLevel(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.hpp b/src/ipa/raspberrypi/controller/rpi/black_level.hpp
deleted file mode 100644
index 65ec4d0e..00000000
--- a/src/ipa/raspberrypi/controller/rpi/black_level.hpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * black_level.hpp - black level control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../black_level_status.h"
-
-// This is our implementation of the "black level algorithm".
-
-namespace RPiController {
-
-class BlackLevel : public Algorithm
-{
-public:
- BlackLevel(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- double black_level_r_;
- double black_level_g_;
- double black_level_b_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.cpp b/src/ipa/raspberrypi/controller/rpi/ccm.cpp
deleted file mode 100644
index 821a4c7c..00000000
--- a/src/ipa/raspberrypi/controller/rpi/ccm.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * ccm.cpp - CCM (colour correction matrix) control algorithm
- */
-
-#include <libcamera/base/log.h>
-
-#include "../awb_status.h"
-#include "../ccm_status.h"
-#include "../lux_status.h"
-#include "../metadata.hpp"
-
-#include "ccm.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiCcm)
-
-// This algorithm selects a CCM (Colour Correction Matrix) according to the
-// colour temperature estimated by AWB (interpolating between known matricies as
-// necessary). Additionally the amount of colour saturation can be controlled
-// both according to the current estimated lux level and according to a
-// saturation setting that is exposed to applications.
-
-#define NAME "rpi.ccm"
-
-Matrix::Matrix()
-{
- memset(m, 0, sizeof(m));
-}
-Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
- double m6, double m7, double m8)
-{
- m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4,
- m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8;
-}
-void Matrix::Read(boost::property_tree::ptree const &params)
-{
- double *ptr = (double *)m;
- int n = 0;
- for (auto it = params.begin(); it != params.end(); it++) {
- if (n++ == 9)
- throw std::runtime_error("Ccm: too many values in CCM");
- *ptr++ = it->second.get_value<double>();
- }
- if (n < 9)
- throw std::runtime_error("Ccm: too few values in CCM");
-}
-
-Ccm::Ccm(Controller *controller)
- : CcmAlgorithm(controller), saturation_(1.0) {}
-
-char const *Ccm::Name() const
-{
- return NAME;
-}
-
-void Ccm::Read(boost::property_tree::ptree const &params)
-{
- if (params.get_child_optional("saturation"))
- config_.saturation.Read(params.get_child("saturation"));
- for (auto &p : params.get_child("ccms")) {
- CtCcm ct_ccm;
- ct_ccm.ct = p.second.get<double>("ct");
- ct_ccm.ccm.Read(p.second.get_child("ccm"));
- if (!config_.ccms.empty() &&
- ct_ccm.ct <= config_.ccms.back().ct)
- throw std::runtime_error(
- "Ccm: CCM not in increasing colour temperature order");
- config_.ccms.push_back(std::move(ct_ccm));
- }
- if (config_.ccms.empty())
- throw std::runtime_error("Ccm: no CCMs specified");
-}
-
-void Ccm::SetSaturation(double saturation)
-{
- saturation_ = saturation;
-}
-
-void Ccm::Initialise() {}
-
-template<typename T>
-static bool get_locked(Metadata *metadata, std::string const &tag, T &value)
-{
- T *ptr = metadata->GetLocked<T>(tag);
- if (ptr == nullptr)
- return false;
- value = *ptr;
- return true;
-}
-
-Matrix calculate_ccm(std::vector<CtCcm> const &ccms, double ct)
-{
- if (ct <= ccms.front().ct)
- return ccms.front().ccm;
- else if (ct >= ccms.back().ct)
- return ccms.back().ccm;
- else {
- int i = 0;
- for (; ct > ccms[i].ct; i++)
- ;
- double lambda =
- (ct - ccms[i - 1].ct) / (ccms[i].ct - ccms[i - 1].ct);
- return lambda * ccms[i].ccm + (1.0 - lambda) * ccms[i - 1].ccm;
- }
-}
-
-Matrix apply_saturation(Matrix const &ccm, double saturation)
-{
- Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419,
- -0.081);
- Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771,
- 0.000);
- Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation);
- return Y2RGB * S * RGB2Y * ccm;
-}
-
-void Ccm::Prepare(Metadata *image_metadata)
-{
- bool awb_ok = false, lux_ok = false;
- struct AwbStatus awb = {};
- awb.temperature_K = 4000; // in case no metadata
- struct LuxStatus lux = {};
- lux.lux = 400; // in case no metadata
- {
- // grab mutex just once to get everything
- std::lock_guard<Metadata> lock(*image_metadata);
- awb_ok = get_locked(image_metadata, "awb.status", awb);
- lux_ok = get_locked(image_metadata, "lux.status", lux);
- }
- if (!awb_ok)
- LOG(RPiCcm, Warning) << "no colour temperature found";
- if (!lux_ok)
- LOG(RPiCcm, Warning) << "no lux value found";
- Matrix ccm = calculate_ccm(config_.ccms, awb.temperature_K);
- double saturation = saturation_;
- struct CcmStatus ccm_status;
- ccm_status.saturation = saturation;
- if (!config_.saturation.Empty())
- saturation *= config_.saturation.Eval(
- config_.saturation.Domain().Clip(lux.lux));
- ccm = apply_saturation(ccm, saturation);
- for (int j = 0; j < 3; j++)
- for (int i = 0; i < 3; i++)
- ccm_status.matrix[j * 3 + i] =
- std::max(-8.0, std::min(7.9999, ccm.m[j][i]));
- LOG(RPiCcm, Debug)
- << "colour temperature " << awb.temperature_K << "K";
- LOG(RPiCcm, Debug)
- << "CCM: " << ccm_status.matrix[0] << " " << ccm_status.matrix[1]
- << " " << ccm_status.matrix[2] << " "
- << ccm_status.matrix[3] << " " << ccm_status.matrix[4]
- << " " << ccm_status.matrix[5] << " "
- << ccm_status.matrix[6] << " " << ccm_status.matrix[7]
- << " " << ccm_status.matrix[8];
- image_metadata->Set("ccm.status", ccm_status);
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Ccm(controller);
- ;
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.cpp b/src/ipa/raspberrypi/controller/rpi/contrast.cpp
deleted file mode 100644
index ae55aad5..00000000
--- a/src/ipa/raspberrypi/controller/rpi/contrast.cpp
+++ /dev/null
@@ -1,185 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * contrast.cpp - contrast (gamma) control algorithm
- */
-#include <stdint.h>
-
-#include <libcamera/base/log.h>
-
-#include "../contrast_status.h"
-#include "../histogram.hpp"
-
-#include "contrast.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiContrast)
-
-// This is a very simple control algorithm which simply retrieves the results of
-// AGC and AWB via their "status" metadata, and applies digital gain to the
-// colour channels in accordance with those instructions. We take care never to
-// apply less than unity gains, as that would cause fully saturated pixels to go
-// off-white.
-
-#define NAME "rpi.contrast"
-
-Contrast::Contrast(Controller *controller)
- : ContrastAlgorithm(controller), brightness_(0.0), contrast_(1.0)
-{
-}
-
-char const *Contrast::Name() const
-{
- return NAME;
-}
-
-void Contrast::Read(boost::property_tree::ptree const &params)
-{
- // enable adaptive enhancement by default
- config_.ce_enable = params.get<int>("ce_enable", 1);
- // the point near the bottom of the histogram to move
- config_.lo_histogram = params.get<double>("lo_histogram", 0.01);
- // where in the range to try and move it to
- config_.lo_level = params.get<double>("lo_level", 0.015);
- // but don't move by more than this
- config_.lo_max = params.get<double>("lo_max", 500);
- // equivalent values for the top of the histogram...
- config_.hi_histogram = params.get<double>("hi_histogram", 0.95);
- config_.hi_level = params.get<double>("hi_level", 0.95);
- config_.hi_max = params.get<double>("hi_max", 2000);
- config_.gamma_curve.Read(params.get_child("gamma_curve"));
-}
-
-void Contrast::SetBrightness(double brightness)
-{
- brightness_ = brightness;
-}
-
-void Contrast::SetContrast(double contrast)
-{
- contrast_ = contrast;
-}
-
-static void fill_in_status(ContrastStatus &status, double brightness,
- double contrast, Pwl &gamma_curve)
-{
- status.brightness = brightness;
- status.contrast = contrast;
- for (int i = 0; i < CONTRAST_NUM_POINTS - 1; i++) {
- int x = i < 16 ? i * 1024
- : (i < 24 ? (i - 16) * 2048 + 16384
- : (i - 24) * 4096 + 32768);
- status.points[i].x = x;
- status.points[i].y = std::min(65535.0, gamma_curve.Eval(x));
- }
- status.points[CONTRAST_NUM_POINTS - 1].x = 65535;
- status.points[CONTRAST_NUM_POINTS - 1].y = 65535;
-}
-
-void Contrast::Initialise()
-{
- // Fill in some default values as Prepare will run before Process gets
- // called.
- fill_in_status(status_, brightness_, contrast_, config_.gamma_curve);
-}
-
-void Contrast::Prepare(Metadata *image_metadata)
-{
- std::unique_lock<std::mutex> lock(mutex_);
- image_metadata->Set("contrast.status", status_);
-}
-
-Pwl compute_stretch_curve(Histogram const &histogram,
- ContrastConfig const &config)
-{
- Pwl enhance;
- enhance.Append(0, 0);
- // If the start of the histogram is rather empty, try to pull it down a
- // bit.
- double hist_lo = histogram.Quantile(config.lo_histogram) *
- (65536 / NUM_HISTOGRAM_BINS);
- double level_lo = config.lo_level * 65536;
- LOG(RPiContrast, Debug)
- << "Move histogram point " << hist_lo << " to " << level_lo;
- hist_lo = std::max(
- level_lo,
- std::min(65535.0, std::min(hist_lo, level_lo + config.lo_max)));
- LOG(RPiContrast, Debug)
- << "Final values " << hist_lo << " -> " << level_lo;
- enhance.Append(hist_lo, level_lo);
- // Keep the mid-point (median) in the same place, though, to limit the
- // apparent amount of global brightness shift.
- double mid = histogram.Quantile(0.5) * (65536 / NUM_HISTOGRAM_BINS);
- enhance.Append(mid, mid);
-
- // If the top to the histogram is empty, try to pull the pixel values
- // there up.
- double hist_hi = histogram.Quantile(config.hi_histogram) *
- (65536 / NUM_HISTOGRAM_BINS);
- double level_hi = config.hi_level * 65536;
- LOG(RPiContrast, Debug)
- << "Move histogram point " << hist_hi << " to " << level_hi;
- hist_hi = std::min(
- level_hi,
- std::max(0.0, std::max(hist_hi, level_hi - config.hi_max)));
- LOG(RPiContrast, Debug)
- << "Final values " << hist_hi << " -> " << level_hi;
- enhance.Append(hist_hi, level_hi);
- enhance.Append(65535, 65535);
- return enhance;
-}
-
-Pwl apply_manual_contrast(Pwl const &gamma_curve, double brightness,
- double contrast)
-{
- Pwl new_gamma_curve;
- LOG(RPiContrast, Debug)
- << "Manual brightness " << brightness << " contrast " << contrast;
- gamma_curve.Map([&](double x, double y) {
- new_gamma_curve.Append(
- x, std::max(0.0, std::min(65535.0,
- (y - 32768) * contrast +
- 32768 + brightness)));
- });
- return new_gamma_curve;
-}
-
-void Contrast::Process(StatisticsPtr &stats,
- [[maybe_unused]] Metadata *image_metadata)
-{
- Histogram histogram(stats->hist[0].g_hist, NUM_HISTOGRAM_BINS);
- // We look at the histogram and adjust the gamma curve in the following
- // ways: 1. Adjust the gamma curve so as to pull the start of the
- // histogram down, and possibly push the end up.
- Pwl gamma_curve = config_.gamma_curve;
- if (config_.ce_enable) {
- if (config_.lo_max != 0 || config_.hi_max != 0)
- gamma_curve = compute_stretch_curve(histogram, config_)
- .Compose(gamma_curve);
- // We could apply other adjustments (e.g. partial equalisation)
- // based on the histogram...?
- }
- // 2. Finally apply any manually selected brightness/contrast
- // adjustment.
- if (brightness_ != 0 || contrast_ != 1.0)
- gamma_curve = apply_manual_contrast(gamma_curve, brightness_,
- contrast_);
- // And fill in the status for output. Use more points towards the bottom
- // of the curve.
- ContrastStatus status;
- fill_in_status(status, brightness_, contrast_, gamma_curve);
- {
- std::unique_lock<std::mutex> lock(mutex_);
- status_ = status;
- }
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Contrast(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.hpp b/src/ipa/raspberrypi/controller/rpi/contrast.hpp
deleted file mode 100644
index 85624539..00000000
--- a/src/ipa/raspberrypi/controller/rpi/contrast.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * contrast.hpp - contrast (gamma) control algorithm
- */
-#pragma once
-
-#include <mutex>
-
-#include "../contrast_algorithm.hpp"
-#include "../pwl.hpp"
-
-namespace RPiController {
-
-// Back End algorithm to appaly correct digital gain. Should be placed after
-// Back End AWB.
-
-struct ContrastConfig {
- bool ce_enable;
- double lo_histogram;
- double lo_level;
- double lo_max;
- double hi_histogram;
- double hi_level;
- double hi_max;
- Pwl gamma_curve;
-};
-
-class Contrast : public ContrastAlgorithm
-{
-public:
- Contrast(Controller *controller = NULL);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void SetBrightness(double brightness) override;
- void SetContrast(double contrast) override;
- void Initialise() override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
-
-private:
- ContrastConfig config_;
- double brightness_;
- double contrast_;
- ContrastStatus status_;
- std::mutex mutex_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.cpp b/src/ipa/raspberrypi/controller/rpi/dpc.cpp
deleted file mode 100644
index 110f5056..00000000
--- a/src/ipa/raspberrypi/controller/rpi/dpc.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * dpc.cpp - DPC (defective pixel correction) control algorithm
- */
-
-#include <libcamera/base/log.h>
-
-#include "dpc.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiDpc)
-
-// We use the lux status so that we can apply stronger settings in darkness (if
-// necessary).
-
-#define NAME "rpi.dpc"
-
-Dpc::Dpc(Controller *controller)
- : Algorithm(controller)
-{
-}
-
-char const *Dpc::Name() const
-{
- return NAME;
-}
-
-void Dpc::Read(boost::property_tree::ptree const &params)
-{
- config_.strength = params.get<int>("strength", 1);
- if (config_.strength < 0 || config_.strength > 2)
- throw std::runtime_error("Dpc: bad strength value");
-}
-
-void Dpc::Prepare(Metadata *image_metadata)
-{
- DpcStatus dpc_status = {};
- // Should we vary this with lux level or analogue gain? TBD.
- dpc_status.strength = config_.strength;
- LOG(RPiDpc, Debug) << "strength " << dpc_status.strength;
- image_metadata->Set("dpc.status", dpc_status);
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Dpc(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.hpp b/src/ipa/raspberrypi/controller/rpi/dpc.hpp
deleted file mode 100644
index d90285c4..00000000
--- a/src/ipa/raspberrypi/controller/rpi/dpc.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * dpc.hpp - DPC (defective pixel correction) control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../dpc_status.h"
-
-namespace RPiController {
-
-// Back End algorithm to apply appropriate GEQ settings.
-
-struct DpcConfig {
- int strength;
-};
-
-class Dpc : public Algorithm
-{
-public:
- Dpc(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- DpcConfig config_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/focus.cpp b/src/ipa/raspberrypi/controller/rpi/focus.cpp
deleted file mode 100644
index a87ec802..00000000
--- a/src/ipa/raspberrypi/controller/rpi/focus.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * focus.cpp - focus algorithm
- */
-#include <stdint.h>
-
-#include <libcamera/base/log.h>
-
-#include "../focus_status.h"
-#include "focus.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiFocus)
-
-#define NAME "rpi.focus"
-
-Focus::Focus(Controller *controller)
- : Algorithm(controller)
-{
-}
-
-char const *Focus::Name() const
-{
- return NAME;
-}
-
-void Focus::Process(StatisticsPtr &stats, Metadata *image_metadata)
-{
- FocusStatus status;
- unsigned int i;
- for (i = 0; i < FOCUS_REGIONS; i++)
- status.focus_measures[i] = stats->focus_stats[i].contrast_val[1][1] / 1000;
- status.num = i;
- image_metadata->Set("focus.status", status);
-
- LOG(RPiFocus, Debug)
- << "Focus contrast measure: "
- << (status.focus_measures[5] + status.focus_measures[6]) / 10;
-}
-
-/* Register algorithm with the system. */
-static Algorithm *Create(Controller *controller)
-{
- return new Focus(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.cpp b/src/ipa/raspberrypi/controller/rpi/geq.cpp
deleted file mode 100644
index 4530cb75..00000000
--- a/src/ipa/raspberrypi/controller/rpi/geq.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * geq.cpp - GEQ (green equalisation) control algorithm
- */
-
-#include <libcamera/base/log.h>
-
-#include "../device_status.h"
-#include "../lux_status.h"
-#include "../pwl.hpp"
-
-#include "geq.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiGeq)
-
-// We use the lux status so that we can apply stronger settings in darkness (if
-// necessary).
-
-#define NAME "rpi.geq"
-
-Geq::Geq(Controller *controller)
- : Algorithm(controller)
-{
-}
-
-char const *Geq::Name() const
-{
- return NAME;
-}
-
-void Geq::Read(boost::property_tree::ptree const &params)
-{
- config_.offset = params.get<uint16_t>("offset", 0);
- config_.slope = params.get<double>("slope", 0.0);
- if (config_.slope < 0.0 || config_.slope >= 1.0)
- throw std::runtime_error("Geq: bad slope value");
- if (params.get_child_optional("strength"))
- config_.strength.Read(params.get_child("strength"));
-}
-
-void Geq::Prepare(Metadata *image_metadata)
-{
- LuxStatus lux_status = {};
- lux_status.lux = 400;
- if (image_metadata->Get("lux.status", lux_status))
- LOG(RPiGeq, Warning) << "no lux data found";
- DeviceStatus device_status;
- device_status.analogue_gain = 1.0; // in case not found
- if (image_metadata->Get("device.status", device_status))
- LOG(RPiGeq, Warning)
- << "no device metadata - use analogue gain of 1x";
- GeqStatus geq_status = {};
- double strength =
- config_.strength.Empty()
- ? 1.0
- : config_.strength.Eval(config_.strength.Domain().Clip(
- lux_status.lux));
- strength *= device_status.analogue_gain;
- double offset = config_.offset * strength;
- double slope = config_.slope * strength;
- geq_status.offset = std::min(65535.0, std::max(0.0, offset));
- geq_status.slope = std::min(.99999, std::max(0.0, slope));
- LOG(RPiGeq, Debug)
- << "offset " << geq_status.offset << " slope "
- << geq_status.slope << " (analogue gain "
- << device_status.analogue_gain << " lux "
- << lux_status.lux << ")";
- image_metadata->Set("geq.status", geq_status);
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Geq(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.hpp b/src/ipa/raspberrypi/controller/rpi/geq.hpp
deleted file mode 100644
index 8ba3046b..00000000
--- a/src/ipa/raspberrypi/controller/rpi/geq.hpp
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * geq.hpp - GEQ (green equalisation) control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../geq_status.h"
-
-namespace RPiController {
-
-// Back End algorithm to apply appropriate GEQ settings.
-
-struct GeqConfig {
- uint16_t offset;
- double slope;
- Pwl strength; // lux to strength factor
-};
-
-class Geq : public Algorithm
-{
-public:
- Geq(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- GeqConfig config_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.cpp b/src/ipa/raspberrypi/controller/rpi/lux.cpp
deleted file mode 100644
index 4d145b6f..00000000
--- a/src/ipa/raspberrypi/controller/rpi/lux.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * lux.cpp - Lux control algorithm
- */
-#include <math.h>
-
-#include <linux/bcm2835-isp.h>
-
-#include <libcamera/base/log.h>
-
-#include "../device_status.h"
-
-#include "lux.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-using namespace std::literals::chrono_literals;
-
-LOG_DEFINE_CATEGORY(RPiLux)
-
-#define NAME "rpi.lux"
-
-Lux::Lux(Controller *controller)
- : Algorithm(controller)
-{
- // Put in some defaults as there will be no meaningful values until
- // Process has run.
- status_.aperture = 1.0;
- status_.lux = 400;
-}
-
-char const *Lux::Name() const
-{
- return NAME;
-}
-
-void Lux::Read(boost::property_tree::ptree const &params)
-{
- reference_shutter_speed_ =
- params.get<double>("reference_shutter_speed") * 1.0us;
- reference_gain_ = params.get<double>("reference_gain");
- reference_aperture_ = params.get<double>("reference_aperture", 1.0);
- reference_Y_ = params.get<double>("reference_Y");
- reference_lux_ = params.get<double>("reference_lux");
- current_aperture_ = reference_aperture_;
-}
-
-void Lux::SetCurrentAperture(double aperture)
-{
- current_aperture_ = aperture;
-}
-
-void Lux::Prepare(Metadata *image_metadata)
-{
- std::unique_lock<std::mutex> lock(mutex_);
- image_metadata->Set("lux.status", status_);
-}
-
-void Lux::Process(StatisticsPtr &stats, Metadata *image_metadata)
-{
- DeviceStatus device_status;
- if (image_metadata->Get("device.status", device_status) == 0) {
- double current_gain = device_status.analogue_gain;
- double current_aperture = device_status.aperture.value_or(current_aperture_);
- uint64_t sum = 0;
- uint32_t num = 0;
- uint32_t *bin = stats->hist[0].g_hist;
- const int num_bins = sizeof(stats->hist[0].g_hist) /
- sizeof(stats->hist[0].g_hist[0]);
- for (int i = 0; i < num_bins; i++)
- sum += bin[i] * (uint64_t)i, num += bin[i];
- // add .5 to reflect the mid-points of bins
- double current_Y = sum / (double)num + .5;
- double gain_ratio = reference_gain_ / current_gain;
- double shutter_speed_ratio =
- reference_shutter_speed_ / device_status.shutter_speed;
- double aperture_ratio = reference_aperture_ / current_aperture;
- double Y_ratio = current_Y * (65536 / num_bins) / reference_Y_;
- double estimated_lux = shutter_speed_ratio * gain_ratio *
- aperture_ratio * aperture_ratio *
- Y_ratio * reference_lux_;
- LuxStatus status;
- status.lux = estimated_lux;
- status.aperture = current_aperture;
- LOG(RPiLux, Debug) << ": estimated lux " << estimated_lux;
- {
- std::unique_lock<std::mutex> lock(mutex_);
- status_ = status;
- }
- // Overwrite the metadata here as well, so that downstream
- // algorithms get the latest value.
- image_metadata->Set("lux.status", status);
- } else
- LOG(RPiLux, Warning) << ": no device metadata";
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Lux(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.hpp b/src/ipa/raspberrypi/controller/rpi/lux.hpp
deleted file mode 100644
index 3ebd35d1..00000000
--- a/src/ipa/raspberrypi/controller/rpi/lux.hpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * lux.hpp - Lux control algorithm
- */
-#pragma once
-
-#include <mutex>
-
-#include <libcamera/base/utils.h>
-
-#include "../lux_status.h"
-#include "../algorithm.hpp"
-
-// This is our implementation of the "lux control algorithm".
-
-namespace RPiController {
-
-class Lux : public Algorithm
-{
-public:
- Lux(Controller *controller);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
- void SetCurrentAperture(double aperture);
-
-private:
- // These values define the conditions of the reference image, against
- // which we compare the new image.
- libcamera::utils::Duration reference_shutter_speed_;
- double reference_gain_;
- double reference_aperture_; // units of 1/f
- double reference_Y_; // out of 65536
- double reference_lux_;
- double current_aperture_;
- LuxStatus status_;
- std::mutex mutex_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.cpp b/src/ipa/raspberrypi/controller/rpi/noise.cpp
deleted file mode 100644
index 63cad639..00000000
--- a/src/ipa/raspberrypi/controller/rpi/noise.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * noise.cpp - Noise control algorithm
- */
-
-#include <math.h>
-
-#include <libcamera/base/log.h>
-
-#include "../device_status.h"
-#include "../noise_status.h"
-
-#include "noise.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiNoise)
-
-#define NAME "rpi.noise"
-
-Noise::Noise(Controller *controller)
- : Algorithm(controller), mode_factor_(1.0)
-{
-}
-
-char const *Noise::Name() const
-{
- return NAME;
-}
-
-void Noise::SwitchMode(CameraMode const &camera_mode,
- [[maybe_unused]] Metadata *metadata)
-{
- // For example, we would expect a 2x2 binned mode to have a "noise
- // factor" of sqrt(2x2) = 2. (can't be less than one, right?)
- mode_factor_ = std::max(1.0, camera_mode.noise_factor);
-}
-
-void Noise::Read(boost::property_tree::ptree const &params)
-{
- reference_constant_ = params.get<double>("reference_constant");
- reference_slope_ = params.get<double>("reference_slope");
-}
-
-void Noise::Prepare(Metadata *image_metadata)
-{
- struct DeviceStatus device_status;
- device_status.analogue_gain = 1.0; // keep compiler calm
- if (image_metadata->Get("device.status", device_status) == 0) {
- // There is a slight question as to exactly how the noise
- // profile, specifically the constant part of it, scales. For
- // now we assume it all scales the same, and we'll revisit this
- // if it proves substantially wrong. NOTE: we may also want to
- // make some adjustments based on the camera mode (such as
- // binning), if we knew how to discover it...
- double factor = sqrt(device_status.analogue_gain) / mode_factor_;
- struct NoiseStatus status;
- status.noise_constant = reference_constant_ * factor;
- status.noise_slope = reference_slope_ * factor;
- image_metadata->Set("noise.status", status);
- LOG(RPiNoise, Debug)
- << "constant " << status.noise_constant
- << " slope " << status.noise_slope;
- } else
- LOG(RPiNoise, Warning) << " no metadata";
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return new Noise(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.hpp b/src/ipa/raspberrypi/controller/rpi/noise.hpp
deleted file mode 100644
index 1c9de5c8..00000000
--- a/src/ipa/raspberrypi/controller/rpi/noise.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * noise.hpp - Noise control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../noise_status.h"
-
-// This is our implementation of the "noise algorithm".
-
-namespace RPiController {
-
-class Noise : public Algorithm
-{
-public:
- Noise(Controller *controller);
- char const *Name() const override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Read(boost::property_tree::ptree const &params) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- // the noise profile for analogue gain of 1.0
- double reference_constant_;
- double reference_slope_;
- double mode_factor_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.cpp b/src/ipa/raspberrypi/controller/rpi/sdn.cpp
deleted file mode 100644
index 93845509..00000000
--- a/src/ipa/raspberrypi/controller/rpi/sdn.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
- *
- * sdn.cpp - SDN (spatial denoise) control algorithm
- */
-
-#include <libcamera/base/log.h>
-
-#include "../denoise_status.h"
-#include "../noise_status.h"
-
-#include "sdn.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiSdn)
-
-// Calculate settings for the spatial denoise block using the noise profile in
-// the image metadata.
-
-#define NAME "rpi.sdn"
-
-Sdn::Sdn(Controller *controller)
- : DenoiseAlgorithm(controller), mode_(DenoiseMode::ColourOff)
-{
-}
-
-char const *Sdn::Name() const
-{
- return NAME;
-}
-
-void Sdn::Read(boost::property_tree::ptree const &params)
-{
- deviation_ = params.get<double>("deviation", 3.2);
- strength_ = params.get<double>("strength", 0.75);
-}
-
-void Sdn::Initialise() {}
-
-void Sdn::Prepare(Metadata *image_metadata)
-{
- struct NoiseStatus noise_status = {};
- noise_status.noise_slope = 3.0; // in case no metadata
- if (image_metadata->Get("noise.status", noise_status) != 0)
- LOG(RPiSdn, Warning) << "no noise profile found";
- LOG(RPiSdn, Debug)
- << "Noise profile: constant " << noise_status.noise_constant
- << " slope " << noise_status.noise_slope;
- struct DenoiseStatus status;
- status.noise_constant = noise_status.noise_constant * deviation_;
- status.noise_slope = noise_status.noise_slope * deviation_;
- status.strength = strength_;
- status.mode = static_cast<std::underlying_type_t<DenoiseMode>>(mode_);
- image_metadata->Set("denoise.status", status);
- LOG(RPiSdn, Debug)
- << "programmed constant " << status.noise_constant
- << " slope " << status.noise_slope
- << " strength " << status.strength;
-}
-
-void Sdn::SetMode(DenoiseMode mode)
-{
- // We only distinguish between off and all other modes.
- mode_ = mode;
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return (Algorithm *)new Sdn(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.hpp b/src/ipa/raspberrypi/controller/rpi/sdn.hpp
deleted file mode 100644
index 2371ce04..00000000
--- a/src/ipa/raspberrypi/controller/rpi/sdn.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * sdn.hpp - SDN (spatial denoise) control algorithm
- */
-#pragma once
-
-#include "../algorithm.hpp"
-#include "../denoise_algorithm.hpp"
-
-namespace RPiController {
-
-// Algorithm to calculate correct spatial denoise (SDN) settings.
-
-class Sdn : public DenoiseAlgorithm
-{
-public:
- Sdn(Controller *controller = NULL);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void Initialise() override;
- void Prepare(Metadata *image_metadata) override;
- void SetMode(DenoiseMode mode) override;
-
-private:
- double deviation_;
- double strength_;
- DenoiseMode mode_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp b/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
deleted file mode 100644
index 18825a43..00000000
--- a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * sharpen.cpp - sharpening control algorithm
- */
-
-#include <math.h>
-
-#include <libcamera/base/log.h>
-
-#include "../sharpen_status.h"
-
-#include "sharpen.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(RPiSharpen)
-
-#define NAME "rpi.sharpen"
-
-Sharpen::Sharpen(Controller *controller)
- : SharpenAlgorithm(controller), user_strength_(1.0)
-{
-}
-
-char const *Sharpen::Name() const
-{
- return NAME;
-}
-
-void Sharpen::SwitchMode(CameraMode const &camera_mode,
- [[maybe_unused]] Metadata *metadata)
-{
- // can't be less than one, right?
- mode_factor_ = std::max(1.0, camera_mode.noise_factor);
-}
-
-void Sharpen::Read(boost::property_tree::ptree const &params)
-{
- threshold_ = params.get<double>("threshold", 1.0);
- strength_ = params.get<double>("strength", 1.0);
- limit_ = params.get<double>("limit", 1.0);
- LOG(RPiSharpen, Debug)
- << "Read threshold " << threshold_
- << " strength " << strength_
- << " limit " << limit_;
-}
-
-void Sharpen::SetStrength(double strength)
-{
- // Note that this function is how an application sets the overall
- // sharpening "strength". We call this the "user strength" field
- // as there already is a strength_ field - being an internal gain
- // parameter that gets passed to the ISP control code. Negative
- // values are not allowed - coerce them to zero (no sharpening).
- user_strength_ = std::max(0.0, strength);
-}
-
-void Sharpen::Prepare(Metadata *image_metadata)
-{
- // The user_strength_ affects the algorithm's internal gain directly, but
- // we adjust the limit and threshold less aggressively. Using a sqrt
- // function is an arbitrary but gentle way of accomplishing this.
- double user_strength_sqrt = sqrt(user_strength_);
- struct SharpenStatus status;
- // Binned modes seem to need the sharpening toned down with this
- // pipeline, thus we use the mode_factor here. Also avoid
- // divide-by-zero with the user_strength_sqrt.
- status.threshold = threshold_ * mode_factor_ /
- std::max(0.01, user_strength_sqrt);
- status.strength = strength_ / mode_factor_ * user_strength_;
- status.limit = limit_ / mode_factor_ * user_strength_sqrt;
- // Finally, report any application-supplied parameters that were used.
- status.user_strength = user_strength_;
- image_metadata->Set("sharpen.status", status);
-}
-
-// Register algorithm with the system.
-static Algorithm *Create(Controller *controller)
-{
- return new Sharpen(controller);
-}
-static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.hpp b/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
deleted file mode 100644
index 13a076a8..00000000
--- a/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * sharpen.hpp - sharpening control algorithm
- */
-#pragma once
-
-#include "../sharpen_algorithm.hpp"
-#include "../sharpen_status.h"
-
-// This is our implementation of the "sharpen algorithm".
-
-namespace RPiController {
-
-class Sharpen : public SharpenAlgorithm
-{
-public:
- Sharpen(Controller *controller);
- char const *Name() const override;
- void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override;
- void Read(boost::property_tree::ptree const &params) override;
- void SetStrength(double strength) override;
- void Prepare(Metadata *image_metadata) override;
-
-private:
- double threshold_;
- double strength_;
- double limit_;
- double mode_factor_;
- double user_strength_;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/sharpen_algorithm.hpp b/src/ipa/raspberrypi/controller/sharpen_algorithm.hpp
deleted file mode 100644
index ca800308..00000000
--- a/src/ipa/raspberrypi/controller/sharpen_algorithm.hpp
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * sharpen_algorithm.hpp - sharpness control algorithm interface
- */
-#pragma once
-
-#include "algorithm.hpp"
-
-namespace RPiController {
-
-class SharpenAlgorithm : public Algorithm
-{
-public:
- SharpenAlgorithm(Controller *controller) : Algorithm(controller) {}
- // A sharpness control algorithm must provide the following:
- virtual void SetStrength(double strength) = 0;
-};
-
-} // namespace RPiController
diff --git a/src/ipa/raspberrypi/controller/sharpen_status.h b/src/ipa/raspberrypi/controller/sharpen_status.h
deleted file mode 100644
index 7501b191..00000000
--- a/src/ipa/raspberrypi/controller/sharpen_status.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
- *
- * sharpen_status.h - Sharpen control algorithm status
- */
-#pragma once
-
-// The "sharpen" algorithm stores the strength to use.
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct SharpenStatus {
- // controls the smallest level of detail (or noise!) that sharpening will pick up
- double threshold;
- // the rate at which the sharpening response ramps once above the threshold
- double strength;
- // upper limit of the allowed sharpening response
- double limit;
- // The sharpening strength requested by the user or application.
- double user_strength;
-};
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/src/ipa/raspberrypi/data/imx219.json b/src/ipa/raspberrypi/data/imx219.json
deleted file mode 100644
index de59d936..00000000
--- a/src/ipa/raspberrypi/data/imx219.json
+++ /dev/null
@@ -1,412 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
-
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 27685,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 998,
- "reference_Y": 12744
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 3.67
- },
- "rpi.geq":
- {
- "offset": 204,
- "slope": 0.01633
- },
- "rpi.sdn":
- {
-
- },
- "rpi.awb":
- {
- "priors":
- [
- {
- "lux": 0, "prior":
- [
- 2000, 1.0, 3000, 0.0, 13000, 0.0
- ]
- },
- {
- "lux": 800, "prior":
- [
- 2000, 0.0, 6000, 2.0, 13000, 2.0
- ]
- },
- {
- "lux": 1500, "prior":
- [
- 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0
- ]
- }
- ],
- "modes":
- {
- "auto":
- {
- "lo": 2500,
- "hi": 8000
- },
- "incandescent":
- {
- "lo": 2500,
- "hi": 3000
- },
- "tungsten":
- {
- "lo": 3000,
- "hi": 3500
- },
- "fluorescent":
- {
- "lo": 4000,
- "hi": 4700
- },
- "indoor":
- {
- "lo": 3000,
- "hi": 5000
- },
- "daylight":
- {
- "lo": 5500,
- "hi": 6500
- },
- "cloudy":
- {
- "lo": 7000,
- "hi": 8600
- }
- },
- "bayes": 1,
- "ct_curve":
- [
- 2498.0, 0.9309, 0.3599, 2911.0, 0.8682, 0.4283, 2919.0, 0.8358, 0.4621, 3627.0, 0.7646, 0.5327, 4600.0, 0.6079, 0.6721, 5716.0,
- 0.5712, 0.7017, 8575.0, 0.4331, 0.8037
- ],
- "sensitivity_r": 1.05,
- "sensitivity_b": 1.05,
- "transverse_pos": 0.04791,
- "transverse_neg": 0.04881
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 66666
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 33333
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "long":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 12.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ],
- "shadows":
- [
- {
- "bound": "LOWER", "q_lo": 0.0, "q_hi": 0.5, "y_target":
- [
- 0, 0.17, 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.7,
- "calibrations_Cr":
- [
- {
- "ct": 3000, "table":
- [
- 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
- 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
- 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
- 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
- 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
- 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
- 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
- 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
- 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
- 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
- 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
- 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
- ]
- },
- {
- "ct": 3850, "table":
- [
- 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
- 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
- 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
- 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
- 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
- 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
- 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
- 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
- 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
- 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
- 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
- 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
- ]
- },
- {
- "ct": 6000, "table":
- [
- 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
- 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
- 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
- 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
- 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
- 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
- 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
- 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
- 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
- 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
- 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
- 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 3000, "table":
- [
- 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
- 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
- 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
- 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
- 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
- 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
- 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
- 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
- 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
- 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
- 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
- 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
- ]
- },
- {
- "ct": 3850, "table":
- [
- 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
- 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
- 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
- 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
- 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
- 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
- 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
- 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
- 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
- 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
- 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
- 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
- ]
- },
- {
- "ct": 6000, "table":
- [
- 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
- 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
- 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
- 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
- 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
- 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
- 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
- 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
- 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
- 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
- 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
- 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
- ]
- }
- ],
- "luminance_lut":
- [
- 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
- 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
- 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
- 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
- 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
- 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
- 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
- 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
- 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
- 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
- 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
- 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
- ],
- "sigma": 0.00381,
- "sigma_Cb": 0.00216
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2498, "ccm":
- [
- 1.58731, -0.18011, -0.40721, -0.60639, 2.03422, -0.42782, -0.19612, -1.69203, 2.88815
- ]
- },
- {
- "ct": 2811, "ccm":
- [
- 1.61593, -0.33164, -0.28429, -0.55048, 1.97779, -0.42731, -0.12042, -1.42847, 2.54889
- ]
- },
- {
- "ct": 2911, "ccm":
- [
- 1.62771, -0.41282, -0.21489, -0.57991, 2.04176, -0.46186, -0.07613, -1.13359, 2.20972
- ]
- },
- {
- "ct": 2919, "ccm":
- [
- 1.62661, -0.37736, -0.24925, -0.52519, 1.95233, -0.42714, -0.10842, -1.34929, 2.45771
- ]
- },
- {
- "ct": 3627, "ccm":
- [
- 1.70385, -0.57231, -0.13154, -0.47763, 1.85998, -0.38235, -0.07467, -0.82678, 1.90145
- ]
- },
- {
- "ct": 4600, "ccm":
- [
- 1.68486, -0.61085, -0.07402, -0.41927, 2.04016, -0.62089, -0.08633, -0.67672, 1.76305
- ]
- },
- {
- "ct": 5716, "ccm":
- [
- 1.80439, -0.73699, -0.06739, -0.36073, 1.83327, -0.47255, -0.08378, -0.56403, 1.64781
- ]
- },
- {
- "ct": 8575, "ccm":
- [
- 1.89357, -0.76427, -0.12931, -0.27399, 2.15605, -0.88206, -0.12035, -0.68256, 1.80292
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
-
- },
- "rpi.dpc":
- {
-
- }
-}
diff --git a/src/ipa/raspberrypi/data/imx219_noir.json b/src/ipa/raspberrypi/data/imx219_noir.json
deleted file mode 100644
index 9a3f03ec..00000000
--- a/src/ipa/raspberrypi/data/imx219_noir.json
+++ /dev/null
@@ -1,344 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
-
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 27685,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 998,
- "reference_Y": 12744
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 3.67
- },
- "rpi.geq":
- {
- "offset": 204,
- "slope": 0.01633
- },
- "rpi.sdn":
- {
-
- },
- "rpi.awb":
- {
- "bayes": 0
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 66666
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 33333
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "long":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 12.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ],
- "shadows":
- [
- {
- "bound": "LOWER", "q_lo": 0.0, "q_hi": 0.5, "y_target":
- [
- 0, 0.17, 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.7,
- "calibrations_Cr":
- [
- {
- "ct": 3000, "table":
- [
- 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
- 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
- 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
- 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
- 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
- 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
- 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
- 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
- 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
- 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
- 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
- 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
- ]
- },
- {
- "ct": 3850, "table":
- [
- 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
- 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
- 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
- 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
- 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
- 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
- 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
- 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
- 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
- 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
- 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
- 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
- ]
- },
- {
- "ct": 6000, "table":
- [
- 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
- 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
- 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
- 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
- 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
- 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
- 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
- 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
- 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
- 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
- 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
- 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 3000, "table":
- [
- 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
- 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
- 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
- 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
- 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
- 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
- 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
- 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
- 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
- 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
- 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
- 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
- ]
- },
- {
- "ct": 3850, "table":
- [
- 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
- 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
- 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
- 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
- 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
- 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
- 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
- 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
- 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
- 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
- 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
- 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
- ]
- },
- {
- "ct": 6000, "table":
- [
- 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
- 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
- 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
- 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
- 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
- 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
- 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
- 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
- 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
- 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
- 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
- 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
- ]
- }
- ],
- "luminance_lut":
- [
- 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
- 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
- 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
- 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
- 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
- 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
- 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
- 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
- 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
- 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
- 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
- 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
- ],
- "sigma": 0.00381,
- "sigma_Cb": 0.00216
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2498, "ccm":
- [
- 1.58731, -0.18011, -0.40721, -0.60639, 2.03422, -0.42782, -0.19612, -1.69203, 2.88815
- ]
- },
- {
- "ct": 2811, "ccm":
- [
- 1.61593, -0.33164, -0.28429, -0.55048, 1.97779, -0.42731, -0.12042, -1.42847, 2.54889
- ]
- },
- {
- "ct": 2911, "ccm":
- [
- 1.62771, -0.41282, -0.21489, -0.57991, 2.04176, -0.46186, -0.07613, -1.13359, 2.20972
- ]
- },
- {
- "ct": 2919, "ccm":
- [
- 1.62661, -0.37736, -0.24925, -0.52519, 1.95233, -0.42714, -0.10842, -1.34929, 2.45771
- ]
- },
- {
- "ct": 3627, "ccm":
- [
- 1.70385, -0.57231, -0.13154, -0.47763, 1.85998, -0.38235, -0.07467, -0.82678, 1.90145
- ]
- },
- {
- "ct": 4600, "ccm":
- [
- 1.68486, -0.61085, -0.07402, -0.41927, 2.04016, -0.62089, -0.08633, -0.67672, 1.76305
- ]
- },
- {
- "ct": 5716, "ccm":
- [
- 1.80439, -0.73699, -0.06739, -0.36073, 1.83327, -0.47255, -0.08378, -0.56403, 1.64781
- ]
- },
- {
- "ct": 8575, "ccm":
- [
- 1.89357, -0.76427, -0.12931, -0.27399, 2.15605, -0.88206, -0.12035, -0.68256, 1.80292
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
-
- },
- "rpi.dpc":
- {
-
- }
-}
diff --git a/src/ipa/raspberrypi/data/imx290.json b/src/ipa/raspberrypi/data/imx290.json
deleted file mode 100644
index 20b45c16..00000000
--- a/src/ipa/raspberrypi/data/imx290.json
+++ /dev/null
@@ -1,165 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 3840
- },
- "rpi.dpc":
- {
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 6813,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 890,
- "reference_Y": 12900
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 2.67
- },
- "rpi.geq":
- {
- "offset": 187,
- "slope": 0.00842
- },
- "rpi.sdn":
- {
- },
- "rpi.awb":
- {
- "bayes": 0
- },
- "rpi.agc":
- {
- "speed": 0.2,
- "metering_modes":
- {
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- },
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 10, 30000, 60000
- ],
- "gain":
- [
- 1.0, 2.0, 8.0
- ]
- },
- "sport":
- {
- "shutter":
- [
- 10, 5000, 10000, 20000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.16, 10000, 0.16
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.7,
- "luminance_lut":
- [
- 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221,
- 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067,
- 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943,
- 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861,
- 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801,
- 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766,
- 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766,
- 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795,
- 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846,
- 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933,
- 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052,
- 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227
- ],
- "sigma": 0.005,
- "sigma_Cb": 0.005
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.sharpen":
- {
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 3900, "ccm":
- [
- 1.54659, -0.17707, -0.36953, -0.51471, 1.72733, -0.21262, 0.06667, -0.92279, 1.85612
- ]
- }
- ]
- },
- "rpi.focus":
- {
- }
-}
diff --git a/src/ipa/raspberrypi/data/imx296.json b/src/ipa/raspberrypi/data/imx296.json
deleted file mode 100644
index 837feff5..00000000
--- a/src/ipa/raspberrypi/data/imx296.json
+++ /dev/null
@@ -1,191 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 19184,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 432,
- "reference_Y": 13773
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 2.957
- },
- "rpi.geq":
- {
- "offset": 185,
- "slope": 0.0105
- },
- "rpi.sdn":
- {
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 6.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 6.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 0,
- "luminance_strength": 0.5,
- "calibrations_Cr":
- [
- {
- "ct": 4000, "table":
- [
- 2.554, 2.554, 2.541, 2.534, 2.495, 2.506, 2.516, 2.517, 2.518, 2.515, 2.513, 2.495, 2.481, 2.533, 2.533, 2.521,
- 2.522, 2.534, 2.539, 2.531, 2.531, 2.506, 2.506, 2.513, 2.513, 2.509, 2.498, 2.496, 2.508, 2.517, 2.521, 2.521,
- 2.509, 2.517, 2.534, 2.529, 2.531, 2.521, 2.517, 2.517, 2.515, 2.514, 2.506, 2.499, 2.508, 2.508, 2.521, 2.537,
- 2.507, 2.508, 2.517, 2.516, 2.495, 2.487, 2.519, 2.534, 2.535, 2.531, 2.499, 2.494, 2.501, 2.511, 2.526, 2.526,
- 2.509, 2.517, 2.507, 2.501, 2.494, 2.519, 2.539, 2.539, 2.537, 2.537, 2.533, 2.499, 2.503, 2.511, 2.529, 2.525,
- 2.521, 2.522, 2.476, 2.501, 2.501, 2.539, 2.546, 2.538, 2.531, 2.538, 2.541, 2.531, 2.529, 2.526, 2.529, 2.525,
- 2.516, 2.519, 2.469, 2.499, 2.499, 2.543, 2.543, 2.531, 2.528, 2.534, 2.541, 2.535, 2.531, 2.526, 2.531, 2.528,
- 2.509, 2.515, 2.465, 2.487, 2.487, 2.539, 2.543, 2.539, 2.533, 2.549, 2.542, 2.531, 2.529, 2.524, 2.532, 2.533,
- 2.499, 2.499, 2.475, 2.482, 2.471, 2.509, 2.539, 2.544, 2.543, 2.545, 2.533, 2.498, 2.521, 2.521, 2.537, 2.536,
- 2.499, 2.488, 2.488, 2.488, 2.471, 2.462, 2.509, 2.539, 2.539, 2.532, 2.498, 2.498, 2.518, 2.518, 2.539, 2.539,
- 2.483, 2.484, 2.488, 2.488, 2.502, 2.496, 2.508, 2.514, 2.518, 2.517, 2.521, 2.518, 2.518, 2.518, 2.525, 2.539,
- 2.483, 2.487, 2.478, 2.478, 2.507, 2.509, 2.514, 2.513, 2.514, 2.517, 2.536, 2.559, 2.501, 2.501, 2.503, 2.525
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 4000, "table":
- [
- 2.619, 2.603, 2.599, 2.597, 2.595, 2.594, 2.589, 2.587, 2.586, 2.589, 2.592, 2.597, 2.601, 2.608, 2.621, 2.621,
- 2.619, 2.615, 2.603, 2.601, 2.596, 2.595, 2.591, 2.589, 2.589, 2.592, 2.599, 2.593, 2.601, 2.613, 2.622, 2.631,
- 2.617, 2.617, 2.612, 2.611, 2.604, 2.598, 2.593, 2.591, 2.592, 2.591, 2.593, 2.595, 2.599, 2.614, 2.623, 2.631,
- 2.624, 2.619, 2.615, 2.612, 2.605, 2.602, 2.597, 2.596, 2.592, 2.592, 2.595, 2.599, 2.602, 2.606, 2.619, 2.624,
- 2.629, 2.627, 2.627, 2.617, 2.609, 2.598, 2.612, 2.623, 2.615, 2.604, 2.589, 2.595, 2.599, 2.608, 2.611, 2.614,
- 2.629, 2.632, 2.637, 2.627, 2.612, 2.612, 2.629, 2.631, 2.628, 2.621, 2.604, 2.597, 2.598, 2.604, 2.609, 2.609,
- 2.635, 2.636, 2.642, 2.628, 2.623, 2.623, 2.636, 2.636, 2.634, 2.628, 2.616, 2.599, 2.597, 2.601, 2.603, 2.601,
- 2.641, 2.639, 2.646, 2.632, 2.627, 2.625, 2.632, 2.635, 2.634, 2.627, 2.614, 2.596, 2.595, 2.599, 2.599, 2.598,
- 2.643, 2.644, 2.651, 2.649, 2.629, 2.617, 2.624, 2.629, 2.625, 2.614, 2.586, 2.599, 2.595, 2.597, 2.592, 2.595,
- 2.645, 2.646, 2.649, 2.649, 2.638, 2.624, 2.616, 2.617, 2.609, 2.604, 2.603, 2.603, 2.595, 2.589, 2.587, 2.592,
- 2.641, 2.643, 2.649, 2.647, 2.638, 2.618, 2.615, 2.608, 2.602, 2.595, 2.596, 2.595, 2.593, 2.584, 2.581, 2.583,
- 2.638, 2.637, 2.647, 2.634, 2.634, 2.618, 2.621, 2.621, 2.611, 2.602, 2.596, 2.583, 2.581, 2.581, 2.576, 2.574
- ]
- }
- ],
- "luminance_lut":
- [
- 1.308, 1.293, 1.228, 1.175, 1.139, 1.108, 1.092, 1.082, 1.082, 1.086, 1.097, 1.114, 1.149, 1.199, 1.279, 1.303,
- 1.293, 1.249, 1.199, 1.162, 1.136, 1.109, 1.087, 1.077, 1.072, 1.081, 1.095, 1.103, 1.133, 1.172, 1.225, 1.282,
- 1.251, 1.212, 1.186, 1.159, 1.129, 1.114, 1.102, 1.088, 1.088, 1.088, 1.095, 1.117, 1.123, 1.158, 1.198, 1.249,
- 1.223, 1.192, 1.177, 1.163, 1.147, 1.139, 1.132, 1.112, 1.111, 1.107, 1.113, 1.118, 1.139, 1.155, 1.186, 1.232,
- 1.207, 1.186, 1.171, 1.162, 1.168, 1.163, 1.153, 1.138, 1.129, 1.128, 1.132, 1.136, 1.149, 1.167, 1.189, 1.216,
- 1.198, 1.186, 1.176, 1.176, 1.177, 1.185, 1.171, 1.157, 1.146, 1.144, 1.146, 1.149, 1.161, 1.181, 1.201, 1.221,
- 1.203, 1.181, 1.176, 1.178, 1.191, 1.189, 1.188, 1.174, 1.159, 1.153, 1.158, 1.161, 1.169, 1.185, 1.211, 1.227,
- 1.211, 1.179, 1.177, 1.187, 1.194, 1.196, 1.194, 1.187, 1.176, 1.169, 1.171, 1.171, 1.175, 1.189, 1.214, 1.226,
- 1.219, 1.182, 1.184, 1.191, 1.195, 1.199, 1.197, 1.194, 1.188, 1.185, 1.179, 1.179, 1.182, 1.194, 1.212, 1.227,
- 1.237, 1.192, 1.194, 1.194, 1.198, 1.199, 1.198, 1.197, 1.196, 1.193, 1.189, 1.189, 1.192, 1.203, 1.214, 1.231,
- 1.282, 1.199, 1.199, 1.197, 1.199, 1.199, 1.192, 1.193, 1.193, 1.194, 1.196, 1.197, 1.206, 1.216, 1.228, 1.244,
- 1.309, 1.236, 1.204, 1.203, 1.202, 1.194, 1.194, 1.188, 1.192, 1.192, 1.199, 1.201, 1.212, 1.221, 1.235, 1.247
- ],
- "sigma": 0.005,
- "sigma_Cb": 0.005
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.sharpen":
- {
- }
-}
diff --git a/src/ipa/raspberrypi/data/imx378.json b/src/ipa/raspberrypi/data/imx378.json
deleted file mode 100644
index 66200345..00000000
--- a/src/ipa/raspberrypi/data/imx378.json
+++ /dev/null
@@ -1,338 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 9999,
- "reference_gain": 1.95,
- "reference_aperture": 1.0,
- "reference_lux": 1000,
- "reference_Y": 12996
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 2.641
- },
- "rpi.geq":
- {
- "offset": 235,
- "slope": 0.00902
- },
- "rpi.sdn":
- {
- },
- "rpi.awb":
- {
- "priors":
- [
- {
- "lux": 0, "prior":
- [
- 2000, 1.0, 3000, 0.0, 13000, 0.0
- ]
- },
- {
- "lux": 800, "prior":
- [
- 2000, 0.0, 6000, 2.0, 13000, 2.0
- ]
- },
- {
- "lux": 1500, "prior":
- [
- 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0
- ]
- }
- ],
- "modes":
- {
- "auto":
- {
- "lo": 2500,
- "hi": 8000
- },
- "incandescent":
- {
- "lo": 2500,
- "hi": 3000
- },
- "tungsten":
- {
- "lo": 3000,
- "hi": 3500
- },
- "fluorescent":
- {
- "lo": 4000,
- "hi": 4700
- },
- "indoor":
- {
- "lo": 3000,
- "hi": 5000
- },
- "daylight":
- {
- "lo": 5500,
- "hi": 6500
- },
- "cloudy":
- {
- "lo": 7000,
- "hi": 8100
- }
- },
- "bayes": 1,
- "ct_curve":
- [
- 2850.0, 0.6361, 0.3911, 3550.0, 0.5386, 0.5077, 4500.0, 0.4472, 0.6171, 5600.0, 0.3906, 0.6848, 8000.0, 0.3412, 0.7441
- ],
- "sensitivity_r": 1.0,
- "sensitivity_b": 1.0,
- "transverse_pos": 0.01667,
- "transverse_neg": 0.01195
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.5,
- "calibrations_Cr":
- [
- {
- "ct": 2800, "table":
- [
- 1.604, 1.601, 1.593, 1.581, 1.568, 1.561, 1.561, 1.561, 1.561, 1.567, 1.582, 1.596, 1.609, 1.622, 1.632, 1.636,
- 1.601, 1.594, 1.586, 1.571, 1.555, 1.546, 1.543, 1.543, 1.547, 1.555, 1.572, 1.584, 1.599, 1.614, 1.625, 1.632,
- 1.599, 1.586, 1.571, 1.555, 1.542, 1.528, 1.518, 1.518, 1.523, 1.537, 1.555, 1.572, 1.589, 1.607, 1.622, 1.629,
- 1.597, 1.579, 1.561, 1.542, 1.528, 1.512, 1.493, 1.493, 1.499, 1.523, 1.537, 1.563, 1.582, 1.601, 1.619, 1.629,
- 1.597, 1.577, 1.557, 1.535, 1.512, 1.493, 1.481, 1.479, 1.492, 1.499, 1.524, 1.555, 1.578, 1.599, 1.619, 1.629,
- 1.597, 1.577, 1.557, 1.534, 1.508, 1.483, 1.476, 1.476, 1.481, 1.496, 1.522, 1.554, 1.578, 1.599, 1.619, 1.629,
- 1.597, 1.578, 1.557, 1.534, 1.508, 1.483, 1.481, 1.479, 1.481, 1.496, 1.522, 1.554, 1.579, 1.601, 1.619, 1.631,
- 1.597, 1.581, 1.562, 1.539, 1.517, 1.504, 1.483, 1.481, 1.496, 1.511, 1.531, 1.561, 1.585, 1.607, 1.623, 1.632,
- 1.601, 1.589, 1.569, 1.554, 1.539, 1.517, 1.504, 1.504, 1.511, 1.531, 1.553, 1.573, 1.596, 1.614, 1.629, 1.636,
- 1.609, 1.601, 1.586, 1.569, 1.554, 1.542, 1.535, 1.535, 1.541, 1.553, 1.573, 1.592, 1.608, 1.625, 1.637, 1.645,
- 1.617, 1.611, 1.601, 1.586, 1.574, 1.565, 1.564, 1.564, 1.571, 1.579, 1.592, 1.608, 1.622, 1.637, 1.646, 1.654,
- 1.619, 1.617, 1.611, 1.601, 1.588, 1.585, 1.585, 1.585, 1.588, 1.592, 1.607, 1.622, 1.637, 1.645, 1.654, 1.655
- ]
- },
- {
- "ct": 5500, "table":
- [
- 2.664, 2.658, 2.645, 2.629, 2.602, 2.602, 2.602, 2.606, 2.617, 2.628, 2.649, 2.677, 2.699, 2.722, 2.736, 2.747,
- 2.658, 2.653, 2.629, 2.605, 2.576, 2.575, 2.577, 2.592, 2.606, 2.618, 2.629, 2.651, 2.678, 2.707, 2.727, 2.741,
- 2.649, 2.631, 2.605, 2.576, 2.563, 2.552, 2.552, 2.557, 2.577, 2.604, 2.619, 2.641, 2.669, 2.698, 2.721, 2.741,
- 2.643, 2.613, 2.583, 2.563, 2.552, 2.531, 2.527, 2.527, 2.551, 2.577, 2.604, 2.638, 2.665, 2.694, 2.721, 2.741,
- 2.643, 2.606, 2.575, 2.558, 2.531, 2.516, 2.504, 2.516, 2.527, 2.551, 2.596, 2.635, 2.665, 2.694, 2.721, 2.741,
- 2.643, 2.606, 2.575, 2.558, 2.531, 2.503, 2.501, 2.502, 2.522, 2.551, 2.592, 2.635, 2.669, 2.696, 2.727, 2.744,
- 2.648, 2.611, 2.579, 2.558, 2.532, 2.511, 2.502, 2.511, 2.522, 2.552, 2.592, 2.642, 2.673, 2.702, 2.731, 2.752,
- 2.648, 2.619, 2.589, 2.571, 2.556, 2.532, 2.519, 2.522, 2.552, 2.568, 2.605, 2.648, 2.683, 2.715, 2.743, 2.758,
- 2.659, 2.637, 2.613, 2.589, 2.571, 2.556, 2.555, 2.555, 2.568, 2.605, 2.641, 2.671, 2.699, 2.729, 2.758, 2.776,
- 2.679, 2.665, 2.637, 2.613, 2.602, 2.599, 2.599, 2.606, 2.619, 2.641, 2.671, 2.698, 2.723, 2.754, 2.776, 2.787,
- 2.695, 2.684, 2.671, 2.646, 2.636, 2.636, 2.641, 2.648, 2.661, 2.681, 2.698, 2.723, 2.751, 2.776, 2.788, 2.803,
- 2.702, 2.699, 2.684, 2.671, 2.664, 2.664, 2.664, 2.668, 2.681, 2.698, 2.723, 2.751, 2.773, 2.788, 2.803, 2.805
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 2800, "table":
- [
- 2.876, 2.868, 2.863, 2.851, 2.846, 2.846, 2.847, 2.851, 2.851, 2.857, 2.867, 2.875, 2.889, 2.899, 2.913, 2.926,
- 2.863, 2.861, 2.856, 2.846, 2.846, 2.847, 2.848, 2.851, 2.857, 2.859, 2.875, 2.882, 2.886, 2.896, 2.909, 2.917,
- 2.861, 2.856, 2.846, 2.841, 2.841, 2.855, 2.867, 2.875, 2.888, 2.888, 2.885, 2.883, 2.886, 2.889, 2.901, 2.913,
- 2.858, 2.851, 2.846, 2.846, 2.855, 2.867, 2.884, 2.895, 2.902, 2.902, 2.901, 2.891, 2.891, 2.894, 2.901, 2.909,
- 2.858, 2.851, 2.846, 2.846, 2.867, 2.884, 2.895, 2.902, 2.909, 2.915, 2.911, 2.901, 2.895, 2.898, 2.904, 2.909,
- 2.858, 2.851, 2.849, 2.853, 2.874, 2.888, 2.901, 2.909, 2.917, 2.922, 2.917, 2.911, 2.901, 2.899, 2.905, 2.908,
- 2.861, 2.855, 2.853, 2.855, 2.874, 2.888, 2.901, 2.913, 2.918, 2.922, 2.921, 2.911, 2.901, 2.901, 2.907, 2.908,
- 2.862, 2.859, 2.855, 2.856, 2.872, 2.885, 2.899, 2.906, 2.915, 2.917, 2.911, 2.907, 2.907, 2.907, 2.908, 2.909,
- 2.863, 2.863, 2.859, 2.864, 2.871, 2.881, 2.885, 2.899, 2.905, 2.905, 2.904, 2.904, 2.907, 2.909, 2.913, 2.913,
- 2.866, 2.865, 2.865, 2.867, 2.868, 2.872, 2.881, 2.885, 2.889, 2.894, 2.895, 2.902, 2.906, 2.913, 2.914, 2.917,
- 2.875, 2.875, 2.871, 2.871, 2.871, 2.871, 2.869, 2.869, 2.878, 2.889, 2.894, 2.895, 2.906, 2.914, 2.917, 2.921,
- 2.882, 2.879, 2.876, 2.874, 2.871, 2.871, 2.869, 2.869, 2.869, 2.878, 2.891, 2.894, 2.905, 2.914, 2.919, 2.921
- ]
- },
- {
- "ct": 5500, "table":
- [
- 1.488, 1.488, 1.488, 1.488, 1.491, 1.492, 1.492, 1.491, 1.491, 1.491, 1.492, 1.495, 1.497, 1.499, 1.499, 1.503,
- 1.482, 1.485, 1.485, 1.487, 1.489, 1.492, 1.492, 1.492, 1.492, 1.492, 1.494, 1.494, 1.492, 1.491, 1.493, 1.494,
- 1.482, 1.482, 1.484, 1.485, 1.487, 1.492, 1.496, 1.498, 1.499, 1.498, 1.494, 1.492, 1.491, 1.491, 1.491, 1.491,
- 1.481, 1.481, 1.482, 1.485, 1.491, 1.496, 1.498, 1.499, 1.501, 1.499, 1.498, 1.493, 1.491, 1.488, 1.488, 1.488,
- 1.481, 1.481, 1.481, 1.483, 1.491, 1.497, 1.498, 1.499, 1.501, 1.499, 1.498, 1.492, 1.488, 1.485, 1.483, 1.483,
- 1.479, 1.479, 1.481, 1.482, 1.489, 1.495, 1.497, 1.498, 1.499, 1.499, 1.495, 1.492, 1.485, 1.482, 1.482, 1.481,
- 1.479, 1.479, 1.479, 1.481, 1.489, 1.494, 1.496, 1.497, 1.497, 1.496, 1.495, 1.489, 1.482, 1.481, 1.479, 1.477,
- 1.478, 1.478, 1.479, 1.481, 1.487, 1.491, 1.494, 1.496, 1.496, 1.495, 1.492, 1.487, 1.482, 1.479, 1.478, 1.476,
- 1.478, 1.478, 1.479, 1.482, 1.486, 1.488, 1.491, 1.493, 1.493, 1.492, 1.487, 1.484, 1.481, 1.479, 1.476, 1.476,
- 1.477, 1.479, 1.481, 1.483, 1.485, 1.486, 1.488, 1.488, 1.487, 1.487, 1.484, 1.483, 1.481, 1.479, 1.476, 1.476,
- 1.477, 1.479, 1.482, 1.483, 1.484, 1.485, 1.484, 1.482, 1.482, 1.484, 1.483, 1.482, 1.481, 1.479, 1.477, 1.476,
- 1.477, 1.479, 1.482, 1.483, 1.484, 1.484, 1.482, 1.482, 1.482, 1.482, 1.482, 1.481, 1.479, 1.479, 1.479, 1.479
- ]
- }
- ],
- "luminance_lut":
- [
- 2.764, 2.654, 2.321, 2.043, 1.768, 1.594, 1.558, 1.558, 1.558, 1.568, 1.661, 1.904, 2.193, 2.497, 2.888, 3.043,
- 2.654, 2.373, 2.049, 1.819, 1.569, 1.446, 1.381, 1.356, 1.356, 1.403, 1.501, 1.679, 1.939, 2.218, 2.586, 2.888,
- 2.376, 2.154, 1.819, 1.569, 1.438, 1.301, 1.246, 1.224, 1.224, 1.263, 1.349, 1.501, 1.679, 1.985, 2.359, 2.609,
- 2.267, 1.987, 1.662, 1.438, 1.301, 1.235, 1.132, 1.105, 1.105, 1.164, 1.263, 1.349, 1.528, 1.808, 2.184, 2.491,
- 2.218, 1.876, 1.568, 1.367, 1.235, 1.132, 1.087, 1.022, 1.023, 1.104, 1.164, 1.278, 1.439, 1.695, 2.066, 2.429,
- 2.218, 1.832, 1.533, 1.341, 1.206, 1.089, 1.013, 1.002, 1.013, 1.026, 1.122, 1.246, 1.399, 1.642, 2.004, 2.426,
- 2.218, 1.832, 1.533, 1.341, 1.206, 1.089, 1.011, 1.001, 1.009, 1.026, 1.122, 1.246, 1.399, 1.642, 2.004, 2.426,
- 2.224, 1.896, 1.584, 1.382, 1.248, 1.147, 1.088, 1.016, 1.026, 1.118, 1.168, 1.283, 1.444, 1.697, 2.066, 2.428,
- 2.292, 2.019, 1.689, 1.462, 1.322, 1.247, 1.147, 1.118, 1.118, 1.168, 1.275, 1.358, 1.532, 1.809, 2.189, 2.491,
- 2.444, 2.204, 1.856, 1.606, 1.462, 1.322, 1.257, 1.234, 1.234, 1.275, 1.358, 1.516, 1.686, 1.993, 2.371, 2.622,
- 2.748, 2.444, 2.108, 1.856, 1.606, 1.476, 1.399, 1.376, 1.376, 1.422, 1.516, 1.686, 1.968, 2.238, 2.611, 2.935,
- 2.862, 2.748, 2.395, 2.099, 1.811, 1.621, 1.582, 1.582, 1.582, 1.592, 1.677, 1.919, 2.223, 2.534, 2.935, 3.078
- ],
- "sigma": 0.00428,
- "sigma_Cb": 0.00363
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2850, "ccm":
- [
- 1.42601, -0.20537, -0.22063, -0.47682, 1.81987, -0.34305, 0.01854, -0.86036, 1.84181
- ]
- },
- {
- "ct": 2900, "ccm":
- [
- 1.29755, 0.04602, -0.34356, -0.41491, 1.73477, -0.31987, -0.01345, -0.97115, 1.98459
- ]
- },
- {
- "ct": 3550, "ccm":
- [
- 1.49811, -0.33412, -0.16398, -0.40869, 1.72995, -0.32127, -0.01924, -0.62181, 1.64105
- ]
- },
- {
- "ct": 4500, "ccm":
- [
- 1.47015, -0.29229, -0.17786, -0.36561, 1.88919, -0.52358, -0.03552, -0.56717, 1.60269
- ]
- },
- {
- "ct": 5600, "ccm":
- [
- 1.60962, -0.47434, -0.13528, -0.32701, 1.73797, -0.41096, -0.07626, -0.40171, 1.47796
- ]
- },
- {
- "ct": 8000, "ccm":
- [
- 1.54642, -0.20396, -0.34246, -0.31748, 2.22559, -0.90811, -0.10035, -0.65877, 1.75912
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
- }
-}
diff --git a/src/ipa/raspberrypi/data/imx477.json b/src/ipa/raspberrypi/data/imx477.json
deleted file mode 100644
index d07febd2..00000000
--- a/src/ipa/raspberrypi/data/imx477.json
+++ /dev/null
@@ -1,430 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
-
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 27242,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 830,
- "reference_Y": 17755
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 2.767
- },
- "rpi.geq":
- {
- "offset": 204,
- "slope": 0.01078
- },
- "rpi.sdn":
- {
-
- },
- "rpi.awb":
- {
- "priors":
- [
- {
- "lux": 0, "prior":
- [
- 2000, 1.0, 3000, 0.0, 13000, 0.0
- ]
- },
- {
- "lux": 800, "prior":
- [
- 2000, 0.0, 6000, 2.0, 13000, 2.0
- ]
- },
- {
- "lux": 1500, "prior":
- [
- 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0
- ]
- }
- ],
- "modes":
- {
- "auto":
- {
- "lo": 2500,
- "hi": 8000
- },
- "incandescent":
- {
- "lo": 2500,
- "hi": 3000
- },
- "tungsten":
- {
- "lo": 3000,
- "hi": 3500
- },
- "fluorescent":
- {
- "lo": 4000,
- "hi": 4700
- },
- "indoor":
- {
- "lo": 3000,
- "hi": 5000
- },
- "daylight":
- {
- "lo": 5500,
- "hi": 6500
- },
- "cloudy":
- {
- "lo": 7000,
- "hi": 8600
- }
- },
- "bayes": 1,
- "ct_curve":
- [
- 2360.0, 0.6009, 0.3093, 2870.0, 0.5047, 0.3936, 2970.0, 0.4782, 0.4221, 3700.0, 0.4212, 0.4923, 3870.0, 0.4037, 0.5166, 4000.0,
- 0.3965, 0.5271, 4400.0, 0.3703, 0.5666, 4715.0, 0.3411, 0.6147, 5920.0, 0.3108, 0.6687, 9050.0, 0.2524, 0.7856
- ],
- "sensitivity_r": 1.05,
- "sensitivity_b": 1.05,
- "transverse_pos": 0.0238,
- "transverse_neg": 0.04429
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 66666
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 33333
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "long":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 12.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.3, 1000, 0.3
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.3, 1000, 0.3
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ],
- "shadows":
- [
- {
- "bound": "LOWER", "q_lo": 0.0, "q_hi": 0.5, "y_target":
- [
- 0, 0.17, 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.5,
- "calibrations_Cr":
- [
- {
- "ct": 2960, "table":
- [
- 2.088, 2.086, 2.082, 2.081, 2.077, 2.071, 2.068, 2.068, 2.072, 2.073, 2.075, 2.078, 2.084, 2.092, 2.095, 2.098,
- 2.086, 2.084, 2.079, 2.078, 2.075, 2.068, 2.064, 2.063, 2.068, 2.071, 2.072, 2.075, 2.081, 2.089, 2.092, 2.094,
- 2.083, 2.081, 2.077, 2.072, 2.069, 2.062, 2.059, 2.059, 2.063, 2.067, 2.069, 2.072, 2.079, 2.088, 2.089, 2.089,
- 2.081, 2.077, 2.072, 2.068, 2.065, 2.058, 2.055, 2.054, 2.057, 2.062, 2.066, 2.069, 2.077, 2.084, 2.086, 2.086,
- 2.078, 2.075, 2.069, 2.065, 2.061, 2.055, 2.052, 2.049, 2.051, 2.056, 2.062, 2.065, 2.072, 2.079, 2.081, 2.079,
- 2.079, 2.075, 2.069, 2.064, 2.061, 2.053, 2.049, 2.046, 2.049, 2.051, 2.057, 2.062, 2.069, 2.075, 2.077, 2.075,
- 2.082, 2.079, 2.072, 2.065, 2.061, 2.054, 2.049, 2.047, 2.049, 2.051, 2.056, 2.061, 2.066, 2.073, 2.073, 2.069,
- 2.086, 2.082, 2.075, 2.068, 2.062, 2.054, 2.051, 2.049, 2.051, 2.052, 2.056, 2.061, 2.066, 2.073, 2.073, 2.072,
- 2.088, 2.086, 2.079, 2.074, 2.066, 2.057, 2.051, 2.051, 2.054, 2.055, 2.056, 2.061, 2.067, 2.072, 2.073, 2.072,
- 2.091, 2.087, 2.079, 2.075, 2.068, 2.057, 2.052, 2.052, 2.056, 2.055, 2.055, 2.059, 2.066, 2.072, 2.072, 2.072,
- 2.093, 2.088, 2.081, 2.077, 2.069, 2.059, 2.054, 2.054, 2.057, 2.056, 2.056, 2.058, 2.066, 2.072, 2.073, 2.073,
- 2.095, 2.091, 2.084, 2.078, 2.075, 2.067, 2.057, 2.057, 2.059, 2.059, 2.058, 2.059, 2.068, 2.073, 2.075, 2.078
- ]
- },
- {
- "ct": 4850, "table":
- [
- 2.973, 2.968, 2.956, 2.943, 2.941, 2.932, 2.923, 2.921, 2.924, 2.929, 2.931, 2.939, 2.953, 2.965, 2.966, 2.976,
- 2.969, 2.962, 2.951, 2.941, 2.934, 2.928, 2.919, 2.918, 2.919, 2.923, 2.927, 2.933, 2.945, 2.957, 2.962, 2.962,
- 2.964, 2.956, 2.944, 2.932, 2.929, 2.924, 2.915, 2.914, 2.915, 2.919, 2.924, 2.928, 2.941, 2.952, 2.958, 2.959,
- 2.957, 2.951, 2.939, 2.928, 2.924, 2.919, 2.913, 2.911, 2.911, 2.915, 2.919, 2.925, 2.936, 2.947, 2.952, 2.953,
- 2.954, 2.947, 2.935, 2.924, 2.919, 2.915, 2.908, 2.906, 2.906, 2.907, 2.914, 2.921, 2.932, 2.941, 2.943, 2.942,
- 2.953, 2.946, 2.932, 2.921, 2.916, 2.911, 2.904, 2.902, 2.901, 2.904, 2.909, 2.919, 2.926, 2.937, 2.939, 2.939,
- 2.953, 2.947, 2.932, 2.918, 2.915, 2.909, 2.903, 2.901, 2.901, 2.906, 2.911, 2.918, 2.924, 2.936, 2.936, 2.932,
- 2.956, 2.948, 2.934, 2.919, 2.916, 2.908, 2.903, 2.901, 2.902, 2.907, 2.909, 2.917, 2.926, 2.936, 2.939, 2.939,
- 2.957, 2.951, 2.936, 2.923, 2.917, 2.907, 2.904, 2.901, 2.902, 2.908, 2.911, 2.919, 2.929, 2.939, 2.942, 2.942,
- 2.961, 2.951, 2.936, 2.922, 2.918, 2.906, 2.904, 2.901, 2.901, 2.907, 2.911, 2.921, 2.931, 2.941, 2.942, 2.944,
- 2.964, 2.954, 2.936, 2.924, 2.918, 2.909, 2.905, 2.905, 2.905, 2.907, 2.912, 2.923, 2.933, 2.942, 2.944, 2.944,
- 2.964, 2.958, 2.943, 2.927, 2.921, 2.914, 2.909, 2.907, 2.907, 2.912, 2.916, 2.928, 2.936, 2.944, 2.947, 2.952
- ]
- },
- {
- "ct": 5930, "table":
- [
- 3.312, 3.308, 3.301, 3.294, 3.288, 3.277, 3.268, 3.261, 3.259, 3.261, 3.267, 3.273, 3.285, 3.301, 3.303, 3.312,
- 3.308, 3.304, 3.294, 3.291, 3.283, 3.271, 3.263, 3.259, 3.257, 3.258, 3.261, 3.268, 3.278, 3.293, 3.299, 3.299,
- 3.302, 3.296, 3.288, 3.282, 3.276, 3.267, 3.259, 3.254, 3.252, 3.253, 3.256, 3.261, 3.273, 3.289, 3.292, 3.292,
- 3.296, 3.289, 3.282, 3.276, 3.269, 3.263, 3.256, 3.251, 3.248, 3.249, 3.251, 3.257, 3.268, 3.279, 3.284, 3.284,
- 3.292, 3.285, 3.279, 3.271, 3.264, 3.257, 3.249, 3.243, 3.241, 3.241, 3.246, 3.252, 3.261, 3.274, 3.275, 3.273,
- 3.291, 3.285, 3.276, 3.268, 3.259, 3.251, 3.242, 3.239, 3.236, 3.238, 3.244, 3.248, 3.258, 3.268, 3.269, 3.265,
- 3.294, 3.288, 3.275, 3.266, 3.257, 3.248, 3.239, 3.238, 3.237, 3.238, 3.243, 3.246, 3.255, 3.264, 3.264, 3.257,
- 3.297, 3.293, 3.279, 3.268, 3.258, 3.249, 3.238, 3.237, 3.239, 3.239, 3.243, 3.245, 3.255, 3.264, 3.264, 3.263,
- 3.301, 3.295, 3.281, 3.271, 3.259, 3.248, 3.237, 3.237, 3.239, 3.241, 3.243, 3.246, 3.257, 3.265, 3.266, 3.264,
- 3.306, 3.295, 3.279, 3.271, 3.261, 3.247, 3.235, 3.234, 3.239, 3.239, 3.243, 3.247, 3.258, 3.265, 3.265, 3.264,
- 3.308, 3.297, 3.279, 3.272, 3.261, 3.249, 3.239, 3.239, 3.241, 3.243, 3.245, 3.248, 3.261, 3.265, 3.266, 3.265,
- 3.309, 3.301, 3.286, 3.276, 3.267, 3.256, 3.246, 3.242, 3.244, 3.244, 3.249, 3.253, 3.263, 3.267, 3.271, 3.274
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 2960, "table":
- [
- 2.133, 2.134, 2.139, 2.143, 2.148, 2.155, 2.158, 2.158, 2.158, 2.161, 2.161, 2.162, 2.159, 2.156, 2.152, 2.151,
- 2.132, 2.133, 2.135, 2.142, 2.147, 2.153, 2.158, 2.158, 2.158, 2.158, 2.159, 2.159, 2.157, 2.154, 2.151, 2.148,
- 2.133, 2.133, 2.135, 2.142, 2.149, 2.154, 2.158, 2.158, 2.157, 2.156, 2.158, 2.157, 2.155, 2.153, 2.148, 2.146,
- 2.133, 2.133, 2.138, 2.145, 2.149, 2.154, 2.158, 2.159, 2.158, 2.155, 2.157, 2.156, 2.153, 2.149, 2.146, 2.144,
- 2.133, 2.134, 2.139, 2.146, 2.149, 2.154, 2.158, 2.159, 2.159, 2.156, 2.154, 2.154, 2.149, 2.145, 2.143, 2.139,
- 2.135, 2.135, 2.139, 2.146, 2.151, 2.155, 2.158, 2.159, 2.158, 2.156, 2.153, 2.151, 2.146, 2.143, 2.139, 2.136,
- 2.135, 2.135, 2.138, 2.145, 2.151, 2.154, 2.157, 2.158, 2.157, 2.156, 2.153, 2.151, 2.147, 2.143, 2.141, 2.137,
- 2.135, 2.134, 2.135, 2.141, 2.149, 2.154, 2.157, 2.157, 2.157, 2.157, 2.157, 2.153, 2.149, 2.146, 2.142, 2.139,
- 2.132, 2.133, 2.135, 2.139, 2.148, 2.153, 2.158, 2.159, 2.159, 2.161, 2.161, 2.157, 2.154, 2.149, 2.144, 2.141,
- 2.132, 2.133, 2.135, 2.141, 2.149, 2.155, 2.161, 2.161, 2.162, 2.162, 2.163, 2.159, 2.154, 2.149, 2.144, 2.138,
- 2.136, 2.136, 2.137, 2.143, 2.149, 2.156, 2.162, 2.163, 2.162, 2.163, 2.164, 2.161, 2.157, 2.152, 2.146, 2.138,
- 2.137, 2.137, 2.141, 2.147, 2.152, 2.157, 2.162, 2.162, 2.159, 2.161, 2.162, 2.162, 2.157, 2.152, 2.148, 2.148
- ]
- },
- {
- "ct": 4850, "table":
- [
- 1.463, 1.464, 1.471, 1.478, 1.479, 1.483, 1.484, 1.486, 1.486, 1.484, 1.483, 1.481, 1.478, 1.475, 1.471, 1.468,
- 1.463, 1.463, 1.468, 1.476, 1.479, 1.482, 1.484, 1.487, 1.486, 1.484, 1.483, 1.482, 1.478, 1.473, 1.469, 1.468,
- 1.463, 1.464, 1.468, 1.476, 1.479, 1.483, 1.484, 1.486, 1.486, 1.485, 1.484, 1.482, 1.477, 1.473, 1.469, 1.468,
- 1.463, 1.464, 1.469, 1.477, 1.481, 1.483, 1.485, 1.487, 1.487, 1.485, 1.485, 1.482, 1.478, 1.474, 1.469, 1.468,
- 1.465, 1.465, 1.471, 1.478, 1.481, 1.484, 1.486, 1.488, 1.488, 1.487, 1.485, 1.482, 1.477, 1.472, 1.468, 1.467,
- 1.465, 1.466, 1.472, 1.479, 1.482, 1.485, 1.486, 1.488, 1.488, 1.486, 1.484, 1.479, 1.475, 1.472, 1.468, 1.466,
- 1.466, 1.466, 1.472, 1.478, 1.482, 1.484, 1.485, 1.488, 1.487, 1.485, 1.483, 1.479, 1.475, 1.472, 1.469, 1.468,
- 1.465, 1.466, 1.469, 1.476, 1.481, 1.485, 1.485, 1.486, 1.486, 1.485, 1.483, 1.479, 1.477, 1.474, 1.471, 1.469,
- 1.464, 1.465, 1.469, 1.476, 1.481, 1.484, 1.485, 1.487, 1.487, 1.486, 1.485, 1.481, 1.478, 1.475, 1.471, 1.469,
- 1.463, 1.464, 1.469, 1.477, 1.481, 1.485, 1.485, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.471, 1.468,
- 1.464, 1.465, 1.471, 1.478, 1.482, 1.486, 1.486, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.472, 1.468,
- 1.465, 1.466, 1.472, 1.481, 1.483, 1.487, 1.487, 1.488, 1.488, 1.486, 1.485, 1.481, 1.479, 1.476, 1.473, 1.472
- ]
- },
- {
- "ct": 5930, "table":
- [
- 1.443, 1.444, 1.448, 1.453, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.466, 1.462, 1.457, 1.454, 1.451,
- 1.443, 1.444, 1.445, 1.451, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.451,
- 1.444, 1.444, 1.445, 1.451, 1.459, 1.463, 1.466, 1.468, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.449,
- 1.444, 1.444, 1.447, 1.452, 1.459, 1.464, 1.467, 1.469, 1.471, 1.469, 1.467, 1.466, 1.461, 1.456, 1.452, 1.449,
- 1.444, 1.445, 1.448, 1.452, 1.459, 1.465, 1.469, 1.471, 1.471, 1.471, 1.468, 1.465, 1.461, 1.455, 1.451, 1.449,
- 1.445, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.471, 1.472, 1.469, 1.467, 1.465, 1.459, 1.455, 1.451, 1.447,
- 1.446, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.459, 1.455, 1.452, 1.449,
- 1.446, 1.446, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.461, 1.457, 1.454, 1.451,
- 1.444, 1.444, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.471, 1.471, 1.468, 1.466, 1.462, 1.458, 1.454, 1.452,
- 1.444, 1.444, 1.448, 1.453, 1.459, 1.466, 1.469, 1.471, 1.472, 1.472, 1.468, 1.466, 1.462, 1.458, 1.454, 1.449,
- 1.446, 1.447, 1.449, 1.454, 1.461, 1.466, 1.471, 1.471, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.455, 1.449,
- 1.447, 1.447, 1.452, 1.457, 1.462, 1.468, 1.472, 1.472, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.456, 1.455
- ]
- }
- ],
- "luminance_lut":
- [
- 1.548, 1.499, 1.387, 1.289, 1.223, 1.183, 1.164, 1.154, 1.153, 1.169, 1.211, 1.265, 1.345, 1.448, 1.581, 1.619,
- 1.513, 1.412, 1.307, 1.228, 1.169, 1.129, 1.105, 1.098, 1.103, 1.127, 1.157, 1.209, 1.272, 1.361, 1.481, 1.583,
- 1.449, 1.365, 1.257, 1.175, 1.124, 1.085, 1.062, 1.054, 1.059, 1.079, 1.113, 1.151, 1.211, 1.293, 1.407, 1.488,
- 1.424, 1.324, 1.222, 1.139, 1.089, 1.056, 1.034, 1.031, 1.034, 1.049, 1.075, 1.115, 1.164, 1.241, 1.351, 1.446,
- 1.412, 1.297, 1.203, 1.119, 1.069, 1.039, 1.021, 1.016, 1.022, 1.032, 1.052, 1.086, 1.135, 1.212, 1.321, 1.439,
- 1.406, 1.287, 1.195, 1.115, 1.059, 1.028, 1.014, 1.012, 1.015, 1.026, 1.041, 1.074, 1.125, 1.201, 1.302, 1.425,
- 1.406, 1.294, 1.205, 1.126, 1.062, 1.031, 1.013, 1.009, 1.011, 1.019, 1.042, 1.079, 1.129, 1.203, 1.302, 1.435,
- 1.415, 1.318, 1.229, 1.146, 1.076, 1.039, 1.019, 1.014, 1.017, 1.031, 1.053, 1.093, 1.144, 1.219, 1.314, 1.436,
- 1.435, 1.348, 1.246, 1.164, 1.094, 1.059, 1.036, 1.032, 1.037, 1.049, 1.072, 1.114, 1.167, 1.257, 1.343, 1.462,
- 1.471, 1.385, 1.278, 1.189, 1.124, 1.084, 1.064, 1.061, 1.069, 1.078, 1.101, 1.146, 1.207, 1.298, 1.415, 1.496,
- 1.522, 1.436, 1.323, 1.228, 1.169, 1.118, 1.101, 1.094, 1.099, 1.113, 1.146, 1.194, 1.265, 1.353, 1.474, 1.571,
- 1.578, 1.506, 1.378, 1.281, 1.211, 1.156, 1.135, 1.134, 1.139, 1.158, 1.194, 1.251, 1.327, 1.427, 1.559, 1.611
- ],
- "sigma": 0.00121,
- "sigma_Cb": 0.00115
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2360, "ccm":
- [
- 1.66078, -0.23588, -0.42491, -0.47456, 1.82763, -0.35307, -0.00545, -1.44729, 2.45273
- ]
- },
- {
- "ct": 2870, "ccm":
- [
- 1.78373, -0.55344, -0.23029, -0.39951, 1.69701, -0.29751, 0.01986, -1.06525, 2.04539
- ]
- },
- {
- "ct": 2970, "ccm":
- [
- 1.73511, -0.56973, -0.16537, -0.36338, 1.69878, -0.33539, -0.02354, -0.76813, 1.79168
- ]
- },
- {
- "ct": 3000, "ccm":
- [
- 2.06374, -0.92218, -0.14156, -0.41721, 1.69289, -0.27568, -0.00554, -0.92741, 1.93295
- ]
- },
- {
- "ct": 3700, "ccm":
- [
- 2.13792, -1.08136, -0.05655, -0.34739, 1.58989, -0.24249, -0.00349, -0.76789, 1.77138
- ]
- },
- {
- "ct": 3870, "ccm":
- [
- 1.83834, -0.70528, -0.13307, -0.30499, 1.60523, -0.30024, -0.05701, -0.58313, 1.64014
- ]
- },
- {
- "ct": 4000, "ccm":
- [
- 2.15741, -1.10295, -0.05447, -0.34631, 1.61158, -0.26528, -0.02723, -0.70288, 1.73011
- ]
- },
- {
- "ct": 4400, "ccm":
- [
- 2.05729, -0.95007, -0.10723, -0.41712, 1.78606, -0.36894, -0.11899, -0.55727, 1.67626
- ]
- },
-
- {
- "ct": 4715, "ccm":
- [
- 1.90255, -0.77478, -0.12777, -0.31338, 1.88197, -0.56858, -0.06001, -0.61785, 1.67786
- ]
- },
- {
- "ct": 5920, "ccm":
- [
- 1.98691, -0.84671, -0.14019, -0.26581, 1.70615, -0.44035, -0.09532, -0.47332, 1.56864
- ]
- },
- {
- "ct": 9050, "ccm":
- [
- 2.09255, -0.76541, -0.32714, -0.28973, 2.27462, -0.98489, -0.17299, -0.61275, 1.78574
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
-
- },
- "rpi.focus":
- {
- }
-}
diff --git a/src/ipa/raspberrypi/data/imx477_noir.json b/src/ipa/raspberrypi/data/imx477_noir.json
deleted file mode 100644
index 7d4fc7da..00000000
--- a/src/ipa/raspberrypi/data/imx477_noir.json
+++ /dev/null
@@ -1,362 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
-
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 27242,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 830,
- "reference_Y": 17755
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 2.767
- },
- "rpi.geq":
- {
- "offset": 204,
- "slope": 0.01078
- },
- "rpi.sdn":
- {
-
- },
- "rpi.awb":
- {
- "bayes": 0
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 66666
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 33333
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "long":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 12.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.3, 1000, 0.3
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.3, 1000, 0.3
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ],
- "shadows":
- [
- {
- "bound": "LOWER", "q_lo": 0.0, "q_hi": 0.5, "y_target":
- [
- 0, 0.17, 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.5,
- "calibrations_Cr":
- [
- {
- "ct": 2960, "table":
- [
- 2.088, 2.086, 2.082, 2.081, 2.077, 2.071, 2.068, 2.068, 2.072, 2.073, 2.075, 2.078, 2.084, 2.092, 2.095, 2.098,
- 2.086, 2.084, 2.079, 2.078, 2.075, 2.068, 2.064, 2.063, 2.068, 2.071, 2.072, 2.075, 2.081, 2.089, 2.092, 2.094,
- 2.083, 2.081, 2.077, 2.072, 2.069, 2.062, 2.059, 2.059, 2.063, 2.067, 2.069, 2.072, 2.079, 2.088, 2.089, 2.089,
- 2.081, 2.077, 2.072, 2.068, 2.065, 2.058, 2.055, 2.054, 2.057, 2.062, 2.066, 2.069, 2.077, 2.084, 2.086, 2.086,
- 2.078, 2.075, 2.069, 2.065, 2.061, 2.055, 2.052, 2.049, 2.051, 2.056, 2.062, 2.065, 2.072, 2.079, 2.081, 2.079,
- 2.079, 2.075, 2.069, 2.064, 2.061, 2.053, 2.049, 2.046, 2.049, 2.051, 2.057, 2.062, 2.069, 2.075, 2.077, 2.075,
- 2.082, 2.079, 2.072, 2.065, 2.061, 2.054, 2.049, 2.047, 2.049, 2.051, 2.056, 2.061, 2.066, 2.073, 2.073, 2.069,
- 2.086, 2.082, 2.075, 2.068, 2.062, 2.054, 2.051, 2.049, 2.051, 2.052, 2.056, 2.061, 2.066, 2.073, 2.073, 2.072,
- 2.088, 2.086, 2.079, 2.074, 2.066, 2.057, 2.051, 2.051, 2.054, 2.055, 2.056, 2.061, 2.067, 2.072, 2.073, 2.072,
- 2.091, 2.087, 2.079, 2.075, 2.068, 2.057, 2.052, 2.052, 2.056, 2.055, 2.055, 2.059, 2.066, 2.072, 2.072, 2.072,
- 2.093, 2.088, 2.081, 2.077, 2.069, 2.059, 2.054, 2.054, 2.057, 2.056, 2.056, 2.058, 2.066, 2.072, 2.073, 2.073,
- 2.095, 2.091, 2.084, 2.078, 2.075, 2.067, 2.057, 2.057, 2.059, 2.059, 2.058, 2.059, 2.068, 2.073, 2.075, 2.078
- ]
- },
- {
- "ct": 4850, "table":
- [
- 2.973, 2.968, 2.956, 2.943, 2.941, 2.932, 2.923, 2.921, 2.924, 2.929, 2.931, 2.939, 2.953, 2.965, 2.966, 2.976,
- 2.969, 2.962, 2.951, 2.941, 2.934, 2.928, 2.919, 2.918, 2.919, 2.923, 2.927, 2.933, 2.945, 2.957, 2.962, 2.962,
- 2.964, 2.956, 2.944, 2.932, 2.929, 2.924, 2.915, 2.914, 2.915, 2.919, 2.924, 2.928, 2.941, 2.952, 2.958, 2.959,
- 2.957, 2.951, 2.939, 2.928, 2.924, 2.919, 2.913, 2.911, 2.911, 2.915, 2.919, 2.925, 2.936, 2.947, 2.952, 2.953,
- 2.954, 2.947, 2.935, 2.924, 2.919, 2.915, 2.908, 2.906, 2.906, 2.907, 2.914, 2.921, 2.932, 2.941, 2.943, 2.942,
- 2.953, 2.946, 2.932, 2.921, 2.916, 2.911, 2.904, 2.902, 2.901, 2.904, 2.909, 2.919, 2.926, 2.937, 2.939, 2.939,
- 2.953, 2.947, 2.932, 2.918, 2.915, 2.909, 2.903, 2.901, 2.901, 2.906, 2.911, 2.918, 2.924, 2.936, 2.936, 2.932,
- 2.956, 2.948, 2.934, 2.919, 2.916, 2.908, 2.903, 2.901, 2.902, 2.907, 2.909, 2.917, 2.926, 2.936, 2.939, 2.939,
- 2.957, 2.951, 2.936, 2.923, 2.917, 2.907, 2.904, 2.901, 2.902, 2.908, 2.911, 2.919, 2.929, 2.939, 2.942, 2.942,
- 2.961, 2.951, 2.936, 2.922, 2.918, 2.906, 2.904, 2.901, 2.901, 2.907, 2.911, 2.921, 2.931, 2.941, 2.942, 2.944,
- 2.964, 2.954, 2.936, 2.924, 2.918, 2.909, 2.905, 2.905, 2.905, 2.907, 2.912, 2.923, 2.933, 2.942, 2.944, 2.944,
- 2.964, 2.958, 2.943, 2.927, 2.921, 2.914, 2.909, 2.907, 2.907, 2.912, 2.916, 2.928, 2.936, 2.944, 2.947, 2.952
- ]
- },
- {
- "ct": 5930, "table":
- [
- 3.312, 3.308, 3.301, 3.294, 3.288, 3.277, 3.268, 3.261, 3.259, 3.261, 3.267, 3.273, 3.285, 3.301, 3.303, 3.312,
- 3.308, 3.304, 3.294, 3.291, 3.283, 3.271, 3.263, 3.259, 3.257, 3.258, 3.261, 3.268, 3.278, 3.293, 3.299, 3.299,
- 3.302, 3.296, 3.288, 3.282, 3.276, 3.267, 3.259, 3.254, 3.252, 3.253, 3.256, 3.261, 3.273, 3.289, 3.292, 3.292,
- 3.296, 3.289, 3.282, 3.276, 3.269, 3.263, 3.256, 3.251, 3.248, 3.249, 3.251, 3.257, 3.268, 3.279, 3.284, 3.284,
- 3.292, 3.285, 3.279, 3.271, 3.264, 3.257, 3.249, 3.243, 3.241, 3.241, 3.246, 3.252, 3.261, 3.274, 3.275, 3.273,
- 3.291, 3.285, 3.276, 3.268, 3.259, 3.251, 3.242, 3.239, 3.236, 3.238, 3.244, 3.248, 3.258, 3.268, 3.269, 3.265,
- 3.294, 3.288, 3.275, 3.266, 3.257, 3.248, 3.239, 3.238, 3.237, 3.238, 3.243, 3.246, 3.255, 3.264, 3.264, 3.257,
- 3.297, 3.293, 3.279, 3.268, 3.258, 3.249, 3.238, 3.237, 3.239, 3.239, 3.243, 3.245, 3.255, 3.264, 3.264, 3.263,
- 3.301, 3.295, 3.281, 3.271, 3.259, 3.248, 3.237, 3.237, 3.239, 3.241, 3.243, 3.246, 3.257, 3.265, 3.266, 3.264,
- 3.306, 3.295, 3.279, 3.271, 3.261, 3.247, 3.235, 3.234, 3.239, 3.239, 3.243, 3.247, 3.258, 3.265, 3.265, 3.264,
- 3.308, 3.297, 3.279, 3.272, 3.261, 3.249, 3.239, 3.239, 3.241, 3.243, 3.245, 3.248, 3.261, 3.265, 3.266, 3.265,
- 3.309, 3.301, 3.286, 3.276, 3.267, 3.256, 3.246, 3.242, 3.244, 3.244, 3.249, 3.253, 3.263, 3.267, 3.271, 3.274
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 2960, "table":
- [
- 2.133, 2.134, 2.139, 2.143, 2.148, 2.155, 2.158, 2.158, 2.158, 2.161, 2.161, 2.162, 2.159, 2.156, 2.152, 2.151,
- 2.132, 2.133, 2.135, 2.142, 2.147, 2.153, 2.158, 2.158, 2.158, 2.158, 2.159, 2.159, 2.157, 2.154, 2.151, 2.148,
- 2.133, 2.133, 2.135, 2.142, 2.149, 2.154, 2.158, 2.158, 2.157, 2.156, 2.158, 2.157, 2.155, 2.153, 2.148, 2.146,
- 2.133, 2.133, 2.138, 2.145, 2.149, 2.154, 2.158, 2.159, 2.158, 2.155, 2.157, 2.156, 2.153, 2.149, 2.146, 2.144,
- 2.133, 2.134, 2.139, 2.146, 2.149, 2.154, 2.158, 2.159, 2.159, 2.156, 2.154, 2.154, 2.149, 2.145, 2.143, 2.139,
- 2.135, 2.135, 2.139, 2.146, 2.151, 2.155, 2.158, 2.159, 2.158, 2.156, 2.153, 2.151, 2.146, 2.143, 2.139, 2.136,
- 2.135, 2.135, 2.138, 2.145, 2.151, 2.154, 2.157, 2.158, 2.157, 2.156, 2.153, 2.151, 2.147, 2.143, 2.141, 2.137,
- 2.135, 2.134, 2.135, 2.141, 2.149, 2.154, 2.157, 2.157, 2.157, 2.157, 2.157, 2.153, 2.149, 2.146, 2.142, 2.139,
- 2.132, 2.133, 2.135, 2.139, 2.148, 2.153, 2.158, 2.159, 2.159, 2.161, 2.161, 2.157, 2.154, 2.149, 2.144, 2.141,
- 2.132, 2.133, 2.135, 2.141, 2.149, 2.155, 2.161, 2.161, 2.162, 2.162, 2.163, 2.159, 2.154, 2.149, 2.144, 2.138,
- 2.136, 2.136, 2.137, 2.143, 2.149, 2.156, 2.162, 2.163, 2.162, 2.163, 2.164, 2.161, 2.157, 2.152, 2.146, 2.138,
- 2.137, 2.137, 2.141, 2.147, 2.152, 2.157, 2.162, 2.162, 2.159, 2.161, 2.162, 2.162, 2.157, 2.152, 2.148, 2.148
- ]
- },
- {
- "ct": 4850, "table":
- [
- 1.463, 1.464, 1.471, 1.478, 1.479, 1.483, 1.484, 1.486, 1.486, 1.484, 1.483, 1.481, 1.478, 1.475, 1.471, 1.468,
- 1.463, 1.463, 1.468, 1.476, 1.479, 1.482, 1.484, 1.487, 1.486, 1.484, 1.483, 1.482, 1.478, 1.473, 1.469, 1.468,
- 1.463, 1.464, 1.468, 1.476, 1.479, 1.483, 1.484, 1.486, 1.486, 1.485, 1.484, 1.482, 1.477, 1.473, 1.469, 1.468,
- 1.463, 1.464, 1.469, 1.477, 1.481, 1.483, 1.485, 1.487, 1.487, 1.485, 1.485, 1.482, 1.478, 1.474, 1.469, 1.468,
- 1.465, 1.465, 1.471, 1.478, 1.481, 1.484, 1.486, 1.488, 1.488, 1.487, 1.485, 1.482, 1.477, 1.472, 1.468, 1.467,
- 1.465, 1.466, 1.472, 1.479, 1.482, 1.485, 1.486, 1.488, 1.488, 1.486, 1.484, 1.479, 1.475, 1.472, 1.468, 1.466,
- 1.466, 1.466, 1.472, 1.478, 1.482, 1.484, 1.485, 1.488, 1.487, 1.485, 1.483, 1.479, 1.475, 1.472, 1.469, 1.468,
- 1.465, 1.466, 1.469, 1.476, 1.481, 1.485, 1.485, 1.486, 1.486, 1.485, 1.483, 1.479, 1.477, 1.474, 1.471, 1.469,
- 1.464, 1.465, 1.469, 1.476, 1.481, 1.484, 1.485, 1.487, 1.487, 1.486, 1.485, 1.481, 1.478, 1.475, 1.471, 1.469,
- 1.463, 1.464, 1.469, 1.477, 1.481, 1.485, 1.485, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.471, 1.468,
- 1.464, 1.465, 1.471, 1.478, 1.482, 1.486, 1.486, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.472, 1.468,
- 1.465, 1.466, 1.472, 1.481, 1.483, 1.487, 1.487, 1.488, 1.488, 1.486, 1.485, 1.481, 1.479, 1.476, 1.473, 1.472
- ]
- },
- {
- "ct": 5930, "table":
- [
- 1.443, 1.444, 1.448, 1.453, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.466, 1.462, 1.457, 1.454, 1.451,
- 1.443, 1.444, 1.445, 1.451, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.451,
- 1.444, 1.444, 1.445, 1.451, 1.459, 1.463, 1.466, 1.468, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.449,
- 1.444, 1.444, 1.447, 1.452, 1.459, 1.464, 1.467, 1.469, 1.471, 1.469, 1.467, 1.466, 1.461, 1.456, 1.452, 1.449,
- 1.444, 1.445, 1.448, 1.452, 1.459, 1.465, 1.469, 1.471, 1.471, 1.471, 1.468, 1.465, 1.461, 1.455, 1.451, 1.449,
- 1.445, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.471, 1.472, 1.469, 1.467, 1.465, 1.459, 1.455, 1.451, 1.447,
- 1.446, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.459, 1.455, 1.452, 1.449,
- 1.446, 1.446, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.461, 1.457, 1.454, 1.451,
- 1.444, 1.444, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.471, 1.471, 1.468, 1.466, 1.462, 1.458, 1.454, 1.452,
- 1.444, 1.444, 1.448, 1.453, 1.459, 1.466, 1.469, 1.471, 1.472, 1.472, 1.468, 1.466, 1.462, 1.458, 1.454, 1.449,
- 1.446, 1.447, 1.449, 1.454, 1.461, 1.466, 1.471, 1.471, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.455, 1.449,
- 1.447, 1.447, 1.452, 1.457, 1.462, 1.468, 1.472, 1.472, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.456, 1.455
- ]
- }
- ],
- "luminance_lut":
- [
- 1.548, 1.499, 1.387, 1.289, 1.223, 1.183, 1.164, 1.154, 1.153, 1.169, 1.211, 1.265, 1.345, 1.448, 1.581, 1.619,
- 1.513, 1.412, 1.307, 1.228, 1.169, 1.129, 1.105, 1.098, 1.103, 1.127, 1.157, 1.209, 1.272, 1.361, 1.481, 1.583,
- 1.449, 1.365, 1.257, 1.175, 1.124, 1.085, 1.062, 1.054, 1.059, 1.079, 1.113, 1.151, 1.211, 1.293, 1.407, 1.488,
- 1.424, 1.324, 1.222, 1.139, 1.089, 1.056, 1.034, 1.031, 1.034, 1.049, 1.075, 1.115, 1.164, 1.241, 1.351, 1.446,
- 1.412, 1.297, 1.203, 1.119, 1.069, 1.039, 1.021, 1.016, 1.022, 1.032, 1.052, 1.086, 1.135, 1.212, 1.321, 1.439,
- 1.406, 1.287, 1.195, 1.115, 1.059, 1.028, 1.014, 1.012, 1.015, 1.026, 1.041, 1.074, 1.125, 1.201, 1.302, 1.425,
- 1.406, 1.294, 1.205, 1.126, 1.062, 1.031, 1.013, 1.009, 1.011, 1.019, 1.042, 1.079, 1.129, 1.203, 1.302, 1.435,
- 1.415, 1.318, 1.229, 1.146, 1.076, 1.039, 1.019, 1.014, 1.017, 1.031, 1.053, 1.093, 1.144, 1.219, 1.314, 1.436,
- 1.435, 1.348, 1.246, 1.164, 1.094, 1.059, 1.036, 1.032, 1.037, 1.049, 1.072, 1.114, 1.167, 1.257, 1.343, 1.462,
- 1.471, 1.385, 1.278, 1.189, 1.124, 1.084, 1.064, 1.061, 1.069, 1.078, 1.101, 1.146, 1.207, 1.298, 1.415, 1.496,
- 1.522, 1.436, 1.323, 1.228, 1.169, 1.118, 1.101, 1.094, 1.099, 1.113, 1.146, 1.194, 1.265, 1.353, 1.474, 1.571,
- 1.578, 1.506, 1.378, 1.281, 1.211, 1.156, 1.135, 1.134, 1.139, 1.158, 1.194, 1.251, 1.327, 1.427, 1.559, 1.611
- ],
- "sigma": 0.00121,
- "sigma_Cb": 0.00115
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2360, "ccm":
- [
- 1.66078, -0.23588, -0.42491, -0.47456, 1.82763, -0.35307, -0.00545, -1.44729, 2.45273
- ]
- },
- {
- "ct": 2870, "ccm":
- [
- 1.78373, -0.55344, -0.23029, -0.39951, 1.69701, -0.29751, 0.01986, -1.06525, 2.04539
- ]
- },
- {
- "ct": 2970, "ccm":
- [
- 1.73511, -0.56973, -0.16537, -0.36338, 1.69878, -0.33539, -0.02354, -0.76813, 1.79168
- ]
- },
- {
- "ct": 3000, "ccm":
- [
- 2.06374, -0.92218, -0.14156, -0.41721, 1.69289, -0.27568, -0.00554, -0.92741, 1.93295
- ]
- },
- {
- "ct": 3700, "ccm":
- [
- 2.13792, -1.08136, -0.05655, -0.34739, 1.58989, -0.24249, -0.00349, -0.76789, 1.77138
- ]
- },
- {
- "ct": 3870, "ccm":
- [
- 1.83834, -0.70528, -0.13307, -0.30499, 1.60523, -0.30024, -0.05701, -0.58313, 1.64014
- ]
- },
- {
- "ct": 4000, "ccm":
- [
- 2.15741, -1.10295, -0.05447, -0.34631, 1.61158, -0.26528, -0.02723, -0.70288, 1.73011
- ]
- },
- {
- "ct": 4400, "ccm":
- [
- 2.05729, -0.95007, -0.10723, -0.41712, 1.78606, -0.36894, -0.11899, -0.55727, 1.67626
- ]
- },
-
- {
- "ct": 4715, "ccm":
- [
- 1.90255, -0.77478, -0.12777, -0.31338, 1.88197, -0.56858, -0.06001, -0.61785, 1.67786
- ]
- },
- {
- "ct": 5920, "ccm":
- [
- 1.98691, -0.84671, -0.14019, -0.26581, 1.70615, -0.44035, -0.09532, -0.47332, 1.56864
- ]
- },
- {
- "ct": 9050, "ccm":
- [
- 2.09255, -0.76541, -0.32714, -0.28973, 2.27462, -0.98489, -0.17299, -0.61275, 1.78574
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
-
- },
- "rpi.focus":
- {
- }
-}
diff --git a/src/ipa/raspberrypi/data/imx519.json b/src/ipa/raspberrypi/data/imx519.json
deleted file mode 100644
index 2ce6a08c..00000000
--- a/src/ipa/raspberrypi/data/imx519.json
+++ /dev/null
@@ -1,338 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.dpc":
- {
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 13841,
- "reference_gain": 2.0,
- "reference_aperture": 1.0,
- "reference_lux": 900,
- "reference_Y": 12064
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 2.776
- },
- "rpi.geq":
- {
- "offset": 189,
- "slope": 0.01495
- },
- "rpi.sdn":
- {
- },
- "rpi.awb":
- {
- "priors":
- [
- {
- "lux": 0, "prior":
- [
- 2000, 1.0, 3000, 0.0, 13000, 0.0
- ]
- },
- {
- "lux": 800, "prior":
- [
- 2000, 0.0, 6000, 2.0, 13000, 2.0
- ]
- },
- {
- "lux": 1500, "prior":
- [
- 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0
- ]
- }
- ],
- "modes":
- {
- "auto":
- {
- "lo": 2500,
- "hi": 7900
- },
- "incandescent":
- {
- "lo": 2500,
- "hi": 3000
- },
- "tungsten":
- {
- "lo": 3000,
- "hi": 3500
- },
- "fluorescent":
- {
- "lo": 4000,
- "hi": 4700
- },
- "indoor":
- {
- "lo": 3000,
- "hi": 5000
- },
- "daylight":
- {
- "lo": 5500,
- "hi": 6500
- },
- "cloudy":
- {
- "lo": 7000,
- "hi": 8000
- }
- },
- "bayes": 1,
- "ct_curve":
- [
- 2890.0, 0.7328, 0.3734, 3550.0, 0.6228, 0.4763, 4500.0, 0.5208, 0.5825, 5700.0, 0.4467, 0.6671, 7900.0, 0.3858, 0.7411
- ],
- "sensitivity_r": 1.0,
- "sensitivity_b": 1.0,
- "transverse_pos": 0.02027,
- "transverse_neg": 0.01935
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.5,
- "calibrations_Cr":
- [
- {
- "ct": 3000, "table":
- [
- 1.527, 1.521, 1.508, 1.493, 1.476, 1.455, 1.442, 1.441, 1.441, 1.441, 1.448, 1.467, 1.483, 1.494, 1.503, 1.504,
- 1.525, 1.513, 1.496, 1.477, 1.461, 1.434, 1.418, 1.409, 1.409, 1.416, 1.429, 1.449, 1.469, 1.485, 1.495, 1.503,
- 1.517, 1.506, 1.485, 1.461, 1.434, 1.412, 1.388, 1.376, 1.376, 1.386, 1.405, 1.429, 1.449, 1.471, 1.488, 1.495,
- 1.512, 1.496, 1.471, 1.442, 1.412, 1.388, 1.361, 1.344, 1.344, 1.358, 1.384, 1.405, 1.431, 1.456, 1.479, 1.489,
- 1.508, 1.488, 1.458, 1.425, 1.393, 1.361, 1.343, 1.322, 1.321, 1.342, 1.358, 1.385, 1.416, 1.445, 1.471, 1.484,
- 1.507, 1.482, 1.453, 1.418, 1.382, 1.349, 1.322, 1.318, 1.318, 1.321, 1.345, 1.373, 1.405, 1.437, 1.465, 1.483,
- 1.507, 1.482, 1.453, 1.418, 1.382, 1.349, 1.322, 1.313, 1.313, 1.321, 1.345, 1.373, 1.405, 1.437, 1.465, 1.483,
- 1.507, 1.485, 1.455, 1.422, 1.387, 1.355, 1.333, 1.319, 1.321, 1.333, 1.351, 1.381, 1.411, 1.441, 1.467, 1.483,
- 1.508, 1.489, 1.463, 1.432, 1.401, 1.372, 1.355, 1.333, 1.333, 1.351, 1.369, 1.393, 1.422, 1.448, 1.471, 1.484,
- 1.511, 1.494, 1.472, 1.444, 1.416, 1.398, 1.372, 1.361, 1.361, 1.369, 1.393, 1.411, 1.436, 1.458, 1.477, 1.487,
- 1.511, 1.496, 1.478, 1.455, 1.436, 1.416, 1.399, 1.391, 1.391, 1.397, 1.411, 1.429, 1.451, 1.466, 1.479, 1.487,
- 1.511, 1.495, 1.478, 1.462, 1.448, 1.432, 1.419, 1.419, 1.419, 1.419, 1.429, 1.445, 1.459, 1.471, 1.482, 1.487
- ]
- },
- {
- "ct": 6000, "table":
- [
- 2.581, 2.573, 2.558, 2.539, 2.514, 2.487, 2.473, 2.471, 2.471, 2.471, 2.479, 2.499, 2.517, 2.532, 2.543, 2.544,
- 2.575, 2.559, 2.539, 2.521, 2.491, 2.458, 2.435, 2.421, 2.421, 2.429, 2.449, 2.477, 2.499, 2.519, 2.534, 2.543,
- 2.561, 2.549, 2.521, 2.491, 2.457, 2.423, 2.393, 2.375, 2.375, 2.387, 2.412, 2.444, 2.475, 2.499, 2.519, 2.532,
- 2.552, 2.531, 2.498, 2.459, 2.423, 2.391, 2.349, 2.325, 2.325, 2.344, 2.374, 2.412, 2.444, 2.476, 2.505, 2.519,
- 2.543, 2.518, 2.479, 2.435, 2.392, 2.349, 2.324, 2.285, 2.283, 2.313, 2.344, 2.374, 2.417, 2.457, 2.489, 2.506,
- 2.541, 2.511, 2.469, 2.421, 2.372, 2.326, 2.284, 2.277, 2.279, 2.283, 2.313, 2.357, 2.401, 2.443, 2.479, 2.504,
- 2.541, 2.511, 2.469, 2.421, 2.372, 2.326, 2.284, 2.267, 2.267, 2.281, 2.313, 2.357, 2.401, 2.443, 2.479, 2.504,
- 2.541, 2.512, 2.472, 2.425, 2.381, 2.338, 2.302, 2.278, 2.279, 2.301, 2.324, 2.364, 2.407, 2.447, 2.481, 2.504,
- 2.544, 2.519, 2.483, 2.441, 2.401, 2.363, 2.338, 2.302, 2.302, 2.324, 2.355, 2.385, 2.423, 2.459, 2.488, 2.506,
- 2.549, 2.527, 2.497, 2.463, 2.427, 2.401, 2.363, 2.345, 2.345, 2.355, 2.385, 2.412, 2.444, 2.473, 2.497, 2.509,
- 2.552, 2.532, 2.507, 2.481, 2.459, 2.427, 2.402, 2.389, 2.389, 2.394, 2.412, 2.444, 2.465, 2.481, 2.499, 2.511,
- 2.553, 2.533, 2.508, 2.489, 2.475, 2.454, 2.429, 2.429, 2.429, 2.429, 2.439, 2.463, 2.481, 2.492, 2.504, 2.511
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 3000, "table":
- [
- 3.132, 3.126, 3.116, 3.103, 3.097, 3.091, 3.087, 3.086, 3.088, 3.091, 3.092, 3.102, 3.113, 3.121, 3.141, 3.144,
- 3.149, 3.132, 3.123, 3.108, 3.101, 3.096, 3.091, 3.089, 3.091, 3.092, 3.101, 3.107, 3.116, 3.129, 3.144, 3.153,
- 3.161, 3.149, 3.129, 3.121, 3.108, 3.103, 3.101, 3.101, 3.101, 3.103, 3.107, 3.116, 3.125, 3.134, 3.153, 3.159,
- 3.176, 3.161, 3.144, 3.129, 3.124, 3.121, 3.117, 3.118, 3.118, 3.119, 3.122, 3.125, 3.134, 3.146, 3.159, 3.171,
- 3.183, 3.176, 3.157, 3.144, 3.143, 3.143, 3.139, 3.141, 3.141, 3.141, 3.141, 3.141, 3.146, 3.161, 3.171, 3.179,
- 3.189, 3.183, 3.165, 3.157, 3.156, 3.157, 3.159, 3.163, 3.163, 3.163, 3.163, 3.161, 3.163, 3.169, 3.179, 3.187,
- 3.199, 3.189, 3.171, 3.165, 3.164, 3.167, 3.171, 3.173, 3.173, 3.172, 3.171, 3.169, 3.169, 3.175, 3.187, 3.189,
- 3.206, 3.196, 3.177, 3.171, 3.165, 3.167, 3.171, 3.173, 3.173, 3.172, 3.171, 3.171, 3.173, 3.177, 3.192, 3.194,
- 3.209, 3.197, 3.178, 3.171, 3.164, 3.161, 3.159, 3.161, 3.162, 3.164, 3.167, 3.171, 3.173, 3.181, 3.193, 3.198,
- 3.204, 3.194, 3.176, 3.165, 3.161, 3.156, 3.154, 3.154, 3.159, 3.161, 3.164, 3.168, 3.173, 3.182, 3.198, 3.199,
- 3.199, 3.191, 3.176, 3.169, 3.161, 3.157, 3.153, 3.153, 3.156, 3.161, 3.164, 3.168, 3.173, 3.186, 3.196, 3.199,
- 3.199, 3.188, 3.179, 3.173, 3.165, 3.157, 3.153, 3.154, 3.156, 3.159, 3.167, 3.171, 3.176, 3.185, 3.193, 3.198
- ]
- },
- {
- "ct": 6000, "table":
- [
- 1.579, 1.579, 1.577, 1.574, 1.573, 1.571, 1.571, 1.571, 1.571, 1.569, 1.569, 1.571, 1.572, 1.574, 1.577, 1.578,
- 1.584, 1.579, 1.578, 1.575, 1.573, 1.572, 1.571, 1.572, 1.572, 1.571, 1.571, 1.572, 1.573, 1.576, 1.578, 1.579,
- 1.587, 1.584, 1.579, 1.578, 1.575, 1.573, 1.573, 1.575, 1.575, 1.574, 1.573, 1.574, 1.576, 1.578, 1.581, 1.581,
- 1.591, 1.587, 1.584, 1.579, 1.578, 1.579, 1.579, 1.581, 1.581, 1.581, 1.578, 1.577, 1.578, 1.581, 1.585, 1.586,
- 1.595, 1.591, 1.587, 1.585, 1.585, 1.586, 1.587, 1.587, 1.588, 1.588, 1.585, 1.584, 1.584, 1.586, 1.589, 1.589,
- 1.597, 1.595, 1.591, 1.589, 1.591, 1.593, 1.595, 1.596, 1.597, 1.597, 1.595, 1.594, 1.592, 1.592, 1.593, 1.593,
- 1.601, 1.597, 1.593, 1.592, 1.593, 1.595, 1.598, 1.599, 1.602, 1.601, 1.598, 1.596, 1.595, 1.596, 1.595, 1.595,
- 1.601, 1.599, 1.594, 1.593, 1.593, 1.595, 1.598, 1.599, 1.602, 1.601, 1.598, 1.597, 1.597, 1.597, 1.597, 1.597,
- 1.602, 1.599, 1.594, 1.593, 1.592, 1.593, 1.595, 1.597, 1.597, 1.598, 1.598, 1.597, 1.597, 1.597, 1.598, 1.598,
- 1.599, 1.598, 1.594, 1.592, 1.591, 1.591, 1.592, 1.595, 1.596, 1.597, 1.597, 1.597, 1.597, 1.599, 1.599, 1.599,
- 1.598, 1.596, 1.594, 1.593, 1.592, 1.592, 1.592, 1.594, 1.595, 1.597, 1.597, 1.597, 1.598, 1.599, 1.599, 1.599,
- 1.597, 1.595, 1.594, 1.594, 1.593, 1.592, 1.593, 1.595, 1.595, 1.597, 1.598, 1.598, 1.598, 1.599, 1.599, 1.599
- ]
- }
- ],
- "luminance_lut":
- [
- 2.887, 2.754, 2.381, 2.105, 1.859, 1.678, 1.625, 1.623, 1.623, 1.624, 1.669, 1.849, 2.092, 2.362, 2.723, 2.838,
- 2.754, 2.443, 2.111, 1.905, 1.678, 1.542, 1.455, 1.412, 1.412, 1.452, 1.535, 1.665, 1.893, 2.096, 2.413, 2.723,
- 2.443, 2.216, 1.911, 1.678, 1.537, 1.372, 1.288, 1.245, 1.245, 1.283, 1.363, 1.527, 1.665, 1.895, 2.193, 2.413,
- 2.318, 2.057, 1.764, 1.541, 1.372, 1.282, 1.159, 1.113, 1.113, 1.151, 1.269, 1.363, 1.527, 1.749, 2.034, 2.278,
- 2.259, 1.953, 1.671, 1.452, 1.283, 1.159, 1.107, 1.018, 1.017, 1.097, 1.151, 1.269, 1.437, 1.655, 1.931, 2.222,
- 2.257, 1.902, 1.624, 1.408, 1.239, 1.111, 1.019, 1.011, 1.005, 1.014, 1.098, 1.227, 1.395, 1.608, 1.883, 2.222,
- 2.257, 1.902, 1.624, 1.408, 1.239, 1.111, 1.016, 1.001, 1.001, 1.007, 1.098, 1.227, 1.395, 1.608, 1.883, 2.222,
- 2.257, 1.946, 1.666, 1.448, 1.281, 1.153, 1.093, 1.013, 1.008, 1.089, 1.143, 1.269, 1.437, 1.654, 1.934, 2.226,
- 2.309, 2.044, 1.756, 1.532, 1.363, 1.259, 1.153, 1.093, 1.093, 1.143, 1.264, 1.354, 1.524, 1.746, 2.035, 2.284,
- 2.425, 2.201, 1.896, 1.662, 1.519, 1.363, 1.259, 1.214, 1.214, 1.264, 1.354, 1.519, 1.655, 1.888, 2.191, 2.413,
- 2.724, 2.417, 2.091, 1.888, 1.662, 1.519, 1.419, 1.373, 1.373, 1.425, 1.521, 1.655, 1.885, 2.089, 2.409, 2.722,
- 2.858, 2.724, 2.356, 2.085, 1.842, 1.658, 1.581, 1.577, 1.577, 1.579, 1.653, 1.838, 2.084, 2.359, 2.722, 2.842
- ],
- "sigma": 0.00372,
- "sigma_Cb": 0.00244
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2890, "ccm":
- [
- 1.36754, -0.18448, -0.18306, -0.32356, 1.44826, -0.12471, -0.00412, -0.69936, 1.70348
- ]
- },
- {
- "ct": 2920, "ccm":
- [
- 1.26704, 0.01624, -0.28328, -0.28516, 1.38934, -0.10419, -0.04854, -0.82211, 1.87066
- ]
- },
- {
- "ct": 3550, "ccm":
- [
- 1.42836, -0.27235, -0.15601, -0.28751, 1.41075, -0.12325, -0.01812, -0.54849, 1.56661
- ]
- },
- {
- "ct": 4500, "ccm":
- [
- 1.36328, -0.19569, -0.16759, -0.25254, 1.52248, -0.26994, -0.01575, -0.53155, 1.54729
- ]
- },
- {
- "ct": 5700, "ccm":
- [
- 1.49207, -0.37245, -0.11963, -0.21493, 1.40005, -0.18512, -0.03781, -0.38779, 1.42561
- ]
- },
- {
- "ct": 7900, "ccm":
- [
- 1.34849, -0.05425, -0.29424, -0.22182, 1.77684, -0.55502, -0.07403, -0.55336, 1.62739
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
- }
-}
diff --git a/src/ipa/raspberrypi/data/ov5647.json b/src/ipa/raspberrypi/data/ov5647.json
deleted file mode 100644
index 24bc06fb..00000000
--- a/src/ipa/raspberrypi/data/ov5647.json
+++ /dev/null
@@ -1,409 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 1024
- },
- "rpi.dpc":
- {
-
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 21663,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 987,
- "reference_Y": 8961
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 4.25
- },
- "rpi.geq":
- {
- "offset": 401,
- "slope": 0.05619
- },
- "rpi.sdn":
- {
-
- },
- "rpi.awb":
- {
- "priors":
- [
- {
- "lux": 0, "prior":
- [
- 2000, 1.0, 3000, 0.0, 13000, 0.0
- ]
- },
- {
- "lux": 800, "prior":
- [
- 2000, 0.0, 6000, 2.0, 13000, 2.0
- ]
- },
- {
- "lux": 1500, "prior":
- [
- 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0
- ]
- }
- ],
- "modes":
- {
- "auto":
- {
- "lo": 2500,
- "hi": 8000
- },
- "incandescent":
- {
- "lo": 2500,
- "hi": 3000
- },
- "tungsten":
- {
- "lo": 3000,
- "hi": 3500
- },
- "fluorescent":
- {
- "lo": 4000,
- "hi": 4700
- },
- "indoor":
- {
- "lo": 3000,
- "hi": 5000
- },
- "daylight":
- {
- "lo": 5500,
- "hi": 6500
- },
- "cloudy":
- {
- "lo": 7000,
- "hi": 8600
- }
- },
- "bayes": 1,
- "ct_curve":
- [
- 2500.0, 1.0289, 0.4503, 2803.0, 0.9428, 0.5108, 2914.0, 0.9406, 0.5127, 3605.0, 0.8261, 0.6249, 4540.0, 0.7331, 0.7533, 5699.0,
- 0.6715, 0.8627, 8625.0, 0.6081, 1.0012
- ],
- "sensitivity_r": 1.05,
- "sensitivity_b": 1.05,
- "transverse_pos": 0.0321,
- "transverse_neg": 0.04313
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 66666
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 33333
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "long":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 12.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ],
- "shadows":
- [
- {
- "bound": "LOWER", "q_lo": 0.0, "q_hi": 0.5, "y_target":
- [
- 0, 0.17, 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ],
- "base_ev": 1.25
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.5,
- "calibrations_Cr":
- [
- {
- "ct": 3000, "table":
- [
- 1.105, 1.103, 1.093, 1.083, 1.071, 1.065, 1.065, 1.065, 1.066, 1.069, 1.072, 1.077, 1.084, 1.089, 1.093, 1.093,
- 1.103, 1.096, 1.084, 1.072, 1.059, 1.051, 1.047, 1.047, 1.051, 1.053, 1.059, 1.067, 1.075, 1.082, 1.085, 1.086,
- 1.096, 1.084, 1.072, 1.059, 1.051, 1.045, 1.039, 1.038, 1.039, 1.045, 1.049, 1.057, 1.063, 1.072, 1.081, 1.082,
- 1.092, 1.075, 1.061, 1.052, 1.045, 1.039, 1.036, 1.035, 1.035, 1.039, 1.044, 1.049, 1.056, 1.063, 1.072, 1.081,
- 1.092, 1.073, 1.058, 1.048, 1.043, 1.038, 1.035, 1.033, 1.033, 1.035, 1.039, 1.044, 1.051, 1.057, 1.069, 1.078,
- 1.091, 1.068, 1.054, 1.045, 1.041, 1.038, 1.035, 1.032, 1.032, 1.032, 1.036, 1.041, 1.045, 1.055, 1.069, 1.078,
- 1.091, 1.068, 1.052, 1.043, 1.041, 1.038, 1.035, 1.032, 1.031, 1.032, 1.034, 1.036, 1.043, 1.055, 1.069, 1.078,
- 1.092, 1.068, 1.052, 1.047, 1.042, 1.041, 1.038, 1.035, 1.032, 1.032, 1.035, 1.039, 1.043, 1.055, 1.071, 1.079,
- 1.092, 1.073, 1.057, 1.051, 1.047, 1.047, 1.044, 1.041, 1.038, 1.038, 1.039, 1.043, 1.051, 1.059, 1.076, 1.083,
- 1.092, 1.081, 1.068, 1.058, 1.056, 1.056, 1.053, 1.052, 1.049, 1.048, 1.048, 1.051, 1.059, 1.066, 1.083, 1.085,
- 1.091, 1.087, 1.081, 1.068, 1.065, 1.064, 1.062, 1.062, 1.061, 1.056, 1.056, 1.056, 1.064, 1.069, 1.084, 1.089,
- 1.091, 1.089, 1.085, 1.079, 1.069, 1.068, 1.067, 1.067, 1.067, 1.063, 1.061, 1.063, 1.068, 1.069, 1.081, 1.092
- ]
- },
- {
- "ct": 5000, "table":
- [
- 1.486, 1.484, 1.468, 1.449, 1.427, 1.403, 1.399, 1.399, 1.399, 1.404, 1.413, 1.433, 1.454, 1.473, 1.482, 1.488,
- 1.484, 1.472, 1.454, 1.431, 1.405, 1.381, 1.365, 1.365, 1.367, 1.373, 1.392, 1.411, 1.438, 1.458, 1.476, 1.481,
- 1.476, 1.458, 1.433, 1.405, 1.381, 1.361, 1.339, 1.334, 1.334, 1.346, 1.362, 1.391, 1.411, 1.438, 1.462, 1.474,
- 1.471, 1.443, 1.417, 1.388, 1.361, 1.339, 1.321, 1.313, 1.313, 1.327, 1.346, 1.362, 1.391, 1.422, 1.453, 1.473,
- 1.469, 1.439, 1.408, 1.377, 1.349, 1.321, 1.312, 1.299, 1.299, 1.311, 1.327, 1.348, 1.378, 1.415, 1.446, 1.468,
- 1.468, 1.434, 1.402, 1.371, 1.341, 1.316, 1.299, 1.296, 1.295, 1.299, 1.314, 1.338, 1.371, 1.408, 1.441, 1.466,
- 1.468, 1.434, 1.401, 1.371, 1.341, 1.316, 1.301, 1.296, 1.295, 1.297, 1.314, 1.338, 1.369, 1.408, 1.441, 1.465,
- 1.469, 1.436, 1.401, 1.374, 1.348, 1.332, 1.315, 1.301, 1.301, 1.313, 1.324, 1.342, 1.372, 1.409, 1.442, 1.465,
- 1.471, 1.444, 1.413, 1.388, 1.371, 1.348, 1.332, 1.323, 1.323, 1.324, 1.342, 1.362, 1.386, 1.418, 1.449, 1.467,
- 1.473, 1.454, 1.431, 1.407, 1.388, 1.371, 1.359, 1.352, 1.351, 1.351, 1.362, 1.383, 1.404, 1.433, 1.462, 1.472,
- 1.474, 1.461, 1.447, 1.424, 1.407, 1.394, 1.385, 1.381, 1.379, 1.381, 1.383, 1.401, 1.419, 1.444, 1.466, 1.481,
- 1.474, 1.464, 1.455, 1.442, 1.421, 1.408, 1.403, 1.403, 1.403, 1.399, 1.402, 1.415, 1.432, 1.446, 1.467, 1.483
- ]
- },
- {
- "ct": 6500, "table":
- [
- 1.567, 1.565, 1.555, 1.541, 1.525, 1.518, 1.518, 1.518, 1.521, 1.527, 1.532, 1.541, 1.551, 1.559, 1.567, 1.569,
- 1.565, 1.557, 1.542, 1.527, 1.519, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.533, 1.542, 1.553, 1.559, 1.562,
- 1.561, 1.546, 1.532, 1.521, 1.518, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.529, 1.533, 1.542, 1.554, 1.559,
- 1.561, 1.539, 1.526, 1.524, 1.521, 1.521, 1.522, 1.524, 1.525, 1.531, 1.529, 1.529, 1.531, 1.538, 1.549, 1.558,
- 1.559, 1.538, 1.526, 1.525, 1.524, 1.528, 1.534, 1.536, 1.536, 1.536, 1.532, 1.529, 1.531, 1.537, 1.548, 1.556,
- 1.561, 1.537, 1.525, 1.524, 1.526, 1.532, 1.537, 1.539, 1.538, 1.537, 1.532, 1.529, 1.529, 1.537, 1.546, 1.556,
- 1.561, 1.536, 1.524, 1.522, 1.525, 1.532, 1.538, 1.538, 1.537, 1.533, 1.528, 1.526, 1.527, 1.536, 1.546, 1.555,
- 1.561, 1.537, 1.522, 1.521, 1.524, 1.531, 1.536, 1.537, 1.534, 1.529, 1.526, 1.522, 1.523, 1.534, 1.547, 1.555,
- 1.561, 1.538, 1.524, 1.522, 1.526, 1.531, 1.535, 1.535, 1.534, 1.527, 1.524, 1.522, 1.522, 1.535, 1.549, 1.556,
- 1.558, 1.543, 1.532, 1.526, 1.526, 1.529, 1.534, 1.535, 1.533, 1.526, 1.523, 1.522, 1.524, 1.537, 1.552, 1.557,
- 1.555, 1.546, 1.541, 1.528, 1.527, 1.528, 1.531, 1.533, 1.531, 1.527, 1.522, 1.522, 1.526, 1.536, 1.552, 1.561,
- 1.555, 1.547, 1.542, 1.538, 1.526, 1.526, 1.529, 1.531, 1.529, 1.528, 1.519, 1.519, 1.527, 1.531, 1.543, 1.561
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 3000, "table":
- [
- 1.684, 1.688, 1.691, 1.697, 1.709, 1.722, 1.735, 1.745, 1.747, 1.745, 1.731, 1.719, 1.709, 1.705, 1.699, 1.699,
- 1.684, 1.689, 1.694, 1.708, 1.721, 1.735, 1.747, 1.762, 1.762, 1.758, 1.745, 1.727, 1.716, 1.707, 1.701, 1.699,
- 1.684, 1.691, 1.704, 1.719, 1.734, 1.755, 1.772, 1.786, 1.789, 1.788, 1.762, 1.745, 1.724, 1.709, 1.702, 1.698,
- 1.682, 1.694, 1.709, 1.729, 1.755, 1.773, 1.798, 1.815, 1.817, 1.808, 1.788, 1.762, 1.733, 1.714, 1.704, 1.699,
- 1.682, 1.693, 1.713, 1.742, 1.772, 1.798, 1.815, 1.829, 1.831, 1.821, 1.807, 1.773, 1.742, 1.716, 1.703, 1.699,
- 1.681, 1.693, 1.713, 1.742, 1.772, 1.799, 1.828, 1.839, 1.839, 1.828, 1.807, 1.774, 1.742, 1.715, 1.699, 1.695,
- 1.679, 1.691, 1.712, 1.739, 1.771, 1.798, 1.825, 1.829, 1.831, 1.818, 1.801, 1.774, 1.738, 1.712, 1.695, 1.691,
- 1.676, 1.685, 1.703, 1.727, 1.761, 1.784, 1.801, 1.817, 1.817, 1.801, 1.779, 1.761, 1.729, 1.706, 1.691, 1.684,
- 1.669, 1.678, 1.692, 1.714, 1.741, 1.764, 1.784, 1.795, 1.795, 1.779, 1.761, 1.738, 1.713, 1.696, 1.683, 1.679,
- 1.664, 1.671, 1.679, 1.693, 1.716, 1.741, 1.762, 1.769, 1.769, 1.753, 1.738, 1.713, 1.701, 1.687, 1.681, 1.676,
- 1.661, 1.664, 1.671, 1.679, 1.693, 1.714, 1.732, 1.739, 1.739, 1.729, 1.708, 1.701, 1.685, 1.679, 1.676, 1.677,
- 1.659, 1.661, 1.664, 1.671, 1.679, 1.693, 1.712, 1.714, 1.714, 1.708, 1.701, 1.687, 1.679, 1.672, 1.673, 1.677
- ]
- },
- {
- "ct": 5000, "table":
- [
- 1.177, 1.183, 1.187, 1.191, 1.197, 1.206, 1.213, 1.215, 1.215, 1.215, 1.211, 1.204, 1.196, 1.191, 1.183, 1.182,
- 1.179, 1.185, 1.191, 1.196, 1.206, 1.217, 1.224, 1.229, 1.229, 1.226, 1.221, 1.212, 1.202, 1.195, 1.188, 1.182,
- 1.183, 1.191, 1.196, 1.206, 1.217, 1.229, 1.239, 1.245, 1.245, 1.245, 1.233, 1.221, 1.212, 1.199, 1.193, 1.187,
- 1.183, 1.192, 1.201, 1.212, 1.229, 1.241, 1.252, 1.259, 1.259, 1.257, 1.245, 1.233, 1.217, 1.201, 1.194, 1.192,
- 1.183, 1.192, 1.202, 1.219, 1.238, 1.252, 1.261, 1.269, 1.268, 1.261, 1.257, 1.241, 1.223, 1.204, 1.194, 1.191,
- 1.182, 1.192, 1.202, 1.219, 1.239, 1.255, 1.266, 1.271, 1.271, 1.265, 1.258, 1.242, 1.223, 1.205, 1.192, 1.191,
- 1.181, 1.189, 1.199, 1.218, 1.239, 1.254, 1.262, 1.268, 1.268, 1.258, 1.253, 1.241, 1.221, 1.204, 1.191, 1.187,
- 1.179, 1.184, 1.193, 1.211, 1.232, 1.243, 1.254, 1.257, 1.256, 1.253, 1.242, 1.232, 1.216, 1.199, 1.187, 1.183,
- 1.174, 1.179, 1.187, 1.202, 1.218, 1.232, 1.243, 1.246, 1.246, 1.239, 1.232, 1.218, 1.207, 1.191, 1.183, 1.179,
- 1.169, 1.175, 1.181, 1.189, 1.202, 1.218, 1.229, 1.232, 1.232, 1.224, 1.218, 1.207, 1.199, 1.185, 1.181, 1.174,
- 1.164, 1.168, 1.175, 1.179, 1.189, 1.201, 1.209, 1.213, 1.213, 1.209, 1.201, 1.198, 1.186, 1.181, 1.174, 1.173,
- 1.161, 1.166, 1.171, 1.175, 1.179, 1.189, 1.197, 1.198, 1.198, 1.197, 1.196, 1.186, 1.182, 1.175, 1.173, 1.173
- ]
- },
- {
- "ct": 6500, "table":
- [
- 1.166, 1.171, 1.173, 1.178, 1.187, 1.193, 1.201, 1.205, 1.205, 1.205, 1.199, 1.191, 1.184, 1.179, 1.174, 1.171,
- 1.166, 1.172, 1.176, 1.184, 1.195, 1.202, 1.209, 1.216, 1.216, 1.213, 1.208, 1.201, 1.189, 1.182, 1.176, 1.171,
- 1.166, 1.173, 1.183, 1.195, 1.202, 1.214, 1.221, 1.228, 1.229, 1.228, 1.221, 1.209, 1.201, 1.186, 1.179, 1.174,
- 1.165, 1.174, 1.187, 1.201, 1.214, 1.223, 1.235, 1.241, 1.242, 1.241, 1.229, 1.221, 1.205, 1.188, 1.181, 1.177,
- 1.165, 1.174, 1.189, 1.207, 1.223, 1.235, 1.242, 1.253, 1.252, 1.245, 1.241, 1.228, 1.211, 1.189, 1.181, 1.178,
- 1.164, 1.173, 1.189, 1.207, 1.224, 1.238, 1.249, 1.255, 1.255, 1.249, 1.242, 1.228, 1.211, 1.191, 1.179, 1.176,
- 1.163, 1.172, 1.187, 1.207, 1.223, 1.237, 1.245, 1.253, 1.252, 1.243, 1.237, 1.228, 1.207, 1.188, 1.176, 1.173,
- 1.159, 1.167, 1.179, 1.199, 1.217, 1.227, 1.237, 1.241, 1.241, 1.237, 1.228, 1.217, 1.201, 1.184, 1.174, 1.169,
- 1.156, 1.164, 1.172, 1.189, 1.205, 1.217, 1.226, 1.229, 1.229, 1.222, 1.217, 1.204, 1.192, 1.177, 1.171, 1.166,
- 1.154, 1.159, 1.166, 1.177, 1.189, 1.205, 1.213, 1.216, 1.216, 1.209, 1.204, 1.192, 1.183, 1.172, 1.168, 1.162,
- 1.152, 1.155, 1.161, 1.166, 1.177, 1.188, 1.195, 1.198, 1.199, 1.196, 1.187, 1.183, 1.173, 1.168, 1.163, 1.162,
- 1.151, 1.154, 1.158, 1.162, 1.168, 1.177, 1.183, 1.184, 1.184, 1.184, 1.182, 1.172, 1.168, 1.165, 1.162, 1.161
- ]
- }
- ],
- "luminance_lut":
- [
- 2.236, 2.111, 1.912, 1.741, 1.579, 1.451, 1.379, 1.349, 1.349, 1.361, 1.411, 1.505, 1.644, 1.816, 2.034, 2.159,
- 2.139, 1.994, 1.796, 1.625, 1.467, 1.361, 1.285, 1.248, 1.239, 1.265, 1.321, 1.408, 1.536, 1.703, 1.903, 2.087,
- 2.047, 1.898, 1.694, 1.511, 1.373, 1.254, 1.186, 1.152, 1.142, 1.166, 1.226, 1.309, 1.441, 1.598, 1.799, 1.978,
- 1.999, 1.824, 1.615, 1.429, 1.281, 1.179, 1.113, 1.077, 1.071, 1.096, 1.153, 1.239, 1.357, 1.525, 1.726, 1.915,
- 1.976, 1.773, 1.563, 1.374, 1.222, 1.119, 1.064, 1.032, 1.031, 1.049, 1.099, 1.188, 1.309, 1.478, 1.681, 1.893,
- 1.973, 1.756, 1.542, 1.351, 1.196, 1.088, 1.028, 1.011, 1.004, 1.029, 1.077, 1.169, 1.295, 1.459, 1.663, 1.891,
- 1.973, 1.761, 1.541, 1.349, 1.193, 1.087, 1.031, 1.006, 1.006, 1.023, 1.075, 1.169, 1.298, 1.463, 1.667, 1.891,
- 1.982, 1.789, 1.568, 1.373, 1.213, 1.111, 1.051, 1.029, 1.024, 1.053, 1.106, 1.199, 1.329, 1.495, 1.692, 1.903,
- 2.015, 1.838, 1.621, 1.426, 1.268, 1.159, 1.101, 1.066, 1.068, 1.099, 1.166, 1.259, 1.387, 1.553, 1.751, 1.937,
- 2.076, 1.911, 1.692, 1.507, 1.346, 1.236, 1.169, 1.136, 1.139, 1.174, 1.242, 1.349, 1.475, 1.641, 1.833, 2.004,
- 2.193, 2.011, 1.798, 1.604, 1.444, 1.339, 1.265, 1.235, 1.237, 1.273, 1.351, 1.461, 1.598, 1.758, 1.956, 2.125,
- 2.263, 2.154, 1.916, 1.711, 1.549, 1.432, 1.372, 1.356, 1.356, 1.383, 1.455, 1.578, 1.726, 1.914, 2.119, 2.211
- ],
- "sigma": 0.006,
- "sigma_Cb": 0.00208
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2500, "ccm":
- [
- 1.70741, -0.05307, -0.65433, -0.62822, 1.68836, -0.06014, -0.04452, -1.87628, 2.92079
- ]
- },
- {
- "ct": 2803, "ccm":
- [
- 1.74383, -0.18731, -0.55652, -0.56491, 1.67772, -0.11281, -0.01522, -1.60635, 2.62157
- ]
- },
- {
- "ct": 2912, "ccm":
- [
- 1.75215, -0.22221, -0.52995, -0.54568, 1.63522, -0.08954, 0.02633, -1.56997, 2.54364
- ]
- },
- {
- "ct": 2914, "ccm":
- [
- 1.72423, -0.28939, -0.43484, -0.55188, 1.62925, -0.07737, 0.01959, -1.28661, 2.26702
- ]
- },
- {
- "ct": 3605, "ccm":
- [
- 1.80381, -0.43646, -0.36735, -0.46505, 1.56814, -0.10309, 0.00929, -1.00424, 1.99495
- ]
- },
- {
- "ct": 4540, "ccm":
- [
- 1.85263, -0.46545, -0.38719, -0.44136, 1.68443, -0.24307, 0.04108, -0.85599, 1.81491
- ]
- },
- {
- "ct": 5699, "ccm":
- [
- 1.98595, -0.63542, -0.35054, -0.34623, 1.54146, -0.19522, 0.00411, -0.70936, 1.70525
- ]
- },
- {
- "ct": 8625, "ccm":
- [
- 2.21637, -0.56663, -0.64974, -0.41133, 1.96625, -0.55492, -0.02307, -0.83529, 1.85837
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
-
- }
-}
diff --git a/src/ipa/raspberrypi/data/ov5647_noir.json b/src/ipa/raspberrypi/data/ov5647_noir.json
deleted file mode 100644
index 1c628ed1..00000000
--- a/src/ipa/raspberrypi/data/ov5647_noir.json
+++ /dev/null
@@ -1,341 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 1024
- },
- "rpi.dpc":
- {
-
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 21663,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 987,
- "reference_Y": 8961
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 4.25
- },
- "rpi.geq":
- {
- "offset": 401,
- "slope": 0.05619
- },
- "rpi.sdn":
- {
-
- },
- "rpi.awb":
- {
- "bayes": 0
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 66666
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 33333
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "long":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 12.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ],
- "shadows":
- [
- {
- "bound": "LOWER", "q_lo": 0.0, "q_hi": 0.5, "y_target":
- [
- 0, 0.17, 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ],
- "base_ev": 1.25
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.5,
- "calibrations_Cr":
- [
- {
- "ct": 3000, "table":
- [
- 1.105, 1.103, 1.093, 1.083, 1.071, 1.065, 1.065, 1.065, 1.066, 1.069, 1.072, 1.077, 1.084, 1.089, 1.093, 1.093,
- 1.103, 1.096, 1.084, 1.072, 1.059, 1.051, 1.047, 1.047, 1.051, 1.053, 1.059, 1.067, 1.075, 1.082, 1.085, 1.086,
- 1.096, 1.084, 1.072, 1.059, 1.051, 1.045, 1.039, 1.038, 1.039, 1.045, 1.049, 1.057, 1.063, 1.072, 1.081, 1.082,
- 1.092, 1.075, 1.061, 1.052, 1.045, 1.039, 1.036, 1.035, 1.035, 1.039, 1.044, 1.049, 1.056, 1.063, 1.072, 1.081,
- 1.092, 1.073, 1.058, 1.048, 1.043, 1.038, 1.035, 1.033, 1.033, 1.035, 1.039, 1.044, 1.051, 1.057, 1.069, 1.078,
- 1.091, 1.068, 1.054, 1.045, 1.041, 1.038, 1.035, 1.032, 1.032, 1.032, 1.036, 1.041, 1.045, 1.055, 1.069, 1.078,
- 1.091, 1.068, 1.052, 1.043, 1.041, 1.038, 1.035, 1.032, 1.031, 1.032, 1.034, 1.036, 1.043, 1.055, 1.069, 1.078,
- 1.092, 1.068, 1.052, 1.047, 1.042, 1.041, 1.038, 1.035, 1.032, 1.032, 1.035, 1.039, 1.043, 1.055, 1.071, 1.079,
- 1.092, 1.073, 1.057, 1.051, 1.047, 1.047, 1.044, 1.041, 1.038, 1.038, 1.039, 1.043, 1.051, 1.059, 1.076, 1.083,
- 1.092, 1.081, 1.068, 1.058, 1.056, 1.056, 1.053, 1.052, 1.049, 1.048, 1.048, 1.051, 1.059, 1.066, 1.083, 1.085,
- 1.091, 1.087, 1.081, 1.068, 1.065, 1.064, 1.062, 1.062, 1.061, 1.056, 1.056, 1.056, 1.064, 1.069, 1.084, 1.089,
- 1.091, 1.089, 1.085, 1.079, 1.069, 1.068, 1.067, 1.067, 1.067, 1.063, 1.061, 1.063, 1.068, 1.069, 1.081, 1.092
- ]
- },
- {
- "ct": 5000, "table":
- [
- 1.486, 1.484, 1.468, 1.449, 1.427, 1.403, 1.399, 1.399, 1.399, 1.404, 1.413, 1.433, 1.454, 1.473, 1.482, 1.488,
- 1.484, 1.472, 1.454, 1.431, 1.405, 1.381, 1.365, 1.365, 1.367, 1.373, 1.392, 1.411, 1.438, 1.458, 1.476, 1.481,
- 1.476, 1.458, 1.433, 1.405, 1.381, 1.361, 1.339, 1.334, 1.334, 1.346, 1.362, 1.391, 1.411, 1.438, 1.462, 1.474,
- 1.471, 1.443, 1.417, 1.388, 1.361, 1.339, 1.321, 1.313, 1.313, 1.327, 1.346, 1.362, 1.391, 1.422, 1.453, 1.473,
- 1.469, 1.439, 1.408, 1.377, 1.349, 1.321, 1.312, 1.299, 1.299, 1.311, 1.327, 1.348, 1.378, 1.415, 1.446, 1.468,
- 1.468, 1.434, 1.402, 1.371, 1.341, 1.316, 1.299, 1.296, 1.295, 1.299, 1.314, 1.338, 1.371, 1.408, 1.441, 1.466,
- 1.468, 1.434, 1.401, 1.371, 1.341, 1.316, 1.301, 1.296, 1.295, 1.297, 1.314, 1.338, 1.369, 1.408, 1.441, 1.465,
- 1.469, 1.436, 1.401, 1.374, 1.348, 1.332, 1.315, 1.301, 1.301, 1.313, 1.324, 1.342, 1.372, 1.409, 1.442, 1.465,
- 1.471, 1.444, 1.413, 1.388, 1.371, 1.348, 1.332, 1.323, 1.323, 1.324, 1.342, 1.362, 1.386, 1.418, 1.449, 1.467,
- 1.473, 1.454, 1.431, 1.407, 1.388, 1.371, 1.359, 1.352, 1.351, 1.351, 1.362, 1.383, 1.404, 1.433, 1.462, 1.472,
- 1.474, 1.461, 1.447, 1.424, 1.407, 1.394, 1.385, 1.381, 1.379, 1.381, 1.383, 1.401, 1.419, 1.444, 1.466, 1.481,
- 1.474, 1.464, 1.455, 1.442, 1.421, 1.408, 1.403, 1.403, 1.403, 1.399, 1.402, 1.415, 1.432, 1.446, 1.467, 1.483
- ]
- },
- {
- "ct": 6500, "table":
- [
- 1.567, 1.565, 1.555, 1.541, 1.525, 1.518, 1.518, 1.518, 1.521, 1.527, 1.532, 1.541, 1.551, 1.559, 1.567, 1.569,
- 1.565, 1.557, 1.542, 1.527, 1.519, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.533, 1.542, 1.553, 1.559, 1.562,
- 1.561, 1.546, 1.532, 1.521, 1.518, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.529, 1.533, 1.542, 1.554, 1.559,
- 1.561, 1.539, 1.526, 1.524, 1.521, 1.521, 1.522, 1.524, 1.525, 1.531, 1.529, 1.529, 1.531, 1.538, 1.549, 1.558,
- 1.559, 1.538, 1.526, 1.525, 1.524, 1.528, 1.534, 1.536, 1.536, 1.536, 1.532, 1.529, 1.531, 1.537, 1.548, 1.556,
- 1.561, 1.537, 1.525, 1.524, 1.526, 1.532, 1.537, 1.539, 1.538, 1.537, 1.532, 1.529, 1.529, 1.537, 1.546, 1.556,
- 1.561, 1.536, 1.524, 1.522, 1.525, 1.532, 1.538, 1.538, 1.537, 1.533, 1.528, 1.526, 1.527, 1.536, 1.546, 1.555,
- 1.561, 1.537, 1.522, 1.521, 1.524, 1.531, 1.536, 1.537, 1.534, 1.529, 1.526, 1.522, 1.523, 1.534, 1.547, 1.555,
- 1.561, 1.538, 1.524, 1.522, 1.526, 1.531, 1.535, 1.535, 1.534, 1.527, 1.524, 1.522, 1.522, 1.535, 1.549, 1.556,
- 1.558, 1.543, 1.532, 1.526, 1.526, 1.529, 1.534, 1.535, 1.533, 1.526, 1.523, 1.522, 1.524, 1.537, 1.552, 1.557,
- 1.555, 1.546, 1.541, 1.528, 1.527, 1.528, 1.531, 1.533, 1.531, 1.527, 1.522, 1.522, 1.526, 1.536, 1.552, 1.561,
- 1.555, 1.547, 1.542, 1.538, 1.526, 1.526, 1.529, 1.531, 1.529, 1.528, 1.519, 1.519, 1.527, 1.531, 1.543, 1.561
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 3000, "table":
- [
- 1.684, 1.688, 1.691, 1.697, 1.709, 1.722, 1.735, 1.745, 1.747, 1.745, 1.731, 1.719, 1.709, 1.705, 1.699, 1.699,
- 1.684, 1.689, 1.694, 1.708, 1.721, 1.735, 1.747, 1.762, 1.762, 1.758, 1.745, 1.727, 1.716, 1.707, 1.701, 1.699,
- 1.684, 1.691, 1.704, 1.719, 1.734, 1.755, 1.772, 1.786, 1.789, 1.788, 1.762, 1.745, 1.724, 1.709, 1.702, 1.698,
- 1.682, 1.694, 1.709, 1.729, 1.755, 1.773, 1.798, 1.815, 1.817, 1.808, 1.788, 1.762, 1.733, 1.714, 1.704, 1.699,
- 1.682, 1.693, 1.713, 1.742, 1.772, 1.798, 1.815, 1.829, 1.831, 1.821, 1.807, 1.773, 1.742, 1.716, 1.703, 1.699,
- 1.681, 1.693, 1.713, 1.742, 1.772, 1.799, 1.828, 1.839, 1.839, 1.828, 1.807, 1.774, 1.742, 1.715, 1.699, 1.695,
- 1.679, 1.691, 1.712, 1.739, 1.771, 1.798, 1.825, 1.829, 1.831, 1.818, 1.801, 1.774, 1.738, 1.712, 1.695, 1.691,
- 1.676, 1.685, 1.703, 1.727, 1.761, 1.784, 1.801, 1.817, 1.817, 1.801, 1.779, 1.761, 1.729, 1.706, 1.691, 1.684,
- 1.669, 1.678, 1.692, 1.714, 1.741, 1.764, 1.784, 1.795, 1.795, 1.779, 1.761, 1.738, 1.713, 1.696, 1.683, 1.679,
- 1.664, 1.671, 1.679, 1.693, 1.716, 1.741, 1.762, 1.769, 1.769, 1.753, 1.738, 1.713, 1.701, 1.687, 1.681, 1.676,
- 1.661, 1.664, 1.671, 1.679, 1.693, 1.714, 1.732, 1.739, 1.739, 1.729, 1.708, 1.701, 1.685, 1.679, 1.676, 1.677,
- 1.659, 1.661, 1.664, 1.671, 1.679, 1.693, 1.712, 1.714, 1.714, 1.708, 1.701, 1.687, 1.679, 1.672, 1.673, 1.677
- ]
- },
- {
- "ct": 5000, "table":
- [
- 1.177, 1.183, 1.187, 1.191, 1.197, 1.206, 1.213, 1.215, 1.215, 1.215, 1.211, 1.204, 1.196, 1.191, 1.183, 1.182,
- 1.179, 1.185, 1.191, 1.196, 1.206, 1.217, 1.224, 1.229, 1.229, 1.226, 1.221, 1.212, 1.202, 1.195, 1.188, 1.182,
- 1.183, 1.191, 1.196, 1.206, 1.217, 1.229, 1.239, 1.245, 1.245, 1.245, 1.233, 1.221, 1.212, 1.199, 1.193, 1.187,
- 1.183, 1.192, 1.201, 1.212, 1.229, 1.241, 1.252, 1.259, 1.259, 1.257, 1.245, 1.233, 1.217, 1.201, 1.194, 1.192,
- 1.183, 1.192, 1.202, 1.219, 1.238, 1.252, 1.261, 1.269, 1.268, 1.261, 1.257, 1.241, 1.223, 1.204, 1.194, 1.191,
- 1.182, 1.192, 1.202, 1.219, 1.239, 1.255, 1.266, 1.271, 1.271, 1.265, 1.258, 1.242, 1.223, 1.205, 1.192, 1.191,
- 1.181, 1.189, 1.199, 1.218, 1.239, 1.254, 1.262, 1.268, 1.268, 1.258, 1.253, 1.241, 1.221, 1.204, 1.191, 1.187,
- 1.179, 1.184, 1.193, 1.211, 1.232, 1.243, 1.254, 1.257, 1.256, 1.253, 1.242, 1.232, 1.216, 1.199, 1.187, 1.183,
- 1.174, 1.179, 1.187, 1.202, 1.218, 1.232, 1.243, 1.246, 1.246, 1.239, 1.232, 1.218, 1.207, 1.191, 1.183, 1.179,
- 1.169, 1.175, 1.181, 1.189, 1.202, 1.218, 1.229, 1.232, 1.232, 1.224, 1.218, 1.207, 1.199, 1.185, 1.181, 1.174,
- 1.164, 1.168, 1.175, 1.179, 1.189, 1.201, 1.209, 1.213, 1.213, 1.209, 1.201, 1.198, 1.186, 1.181, 1.174, 1.173,
- 1.161, 1.166, 1.171, 1.175, 1.179, 1.189, 1.197, 1.198, 1.198, 1.197, 1.196, 1.186, 1.182, 1.175, 1.173, 1.173
- ]
- },
- {
- "ct": 6500, "table":
- [
- 1.166, 1.171, 1.173, 1.178, 1.187, 1.193, 1.201, 1.205, 1.205, 1.205, 1.199, 1.191, 1.184, 1.179, 1.174, 1.171,
- 1.166, 1.172, 1.176, 1.184, 1.195, 1.202, 1.209, 1.216, 1.216, 1.213, 1.208, 1.201, 1.189, 1.182, 1.176, 1.171,
- 1.166, 1.173, 1.183, 1.195, 1.202, 1.214, 1.221, 1.228, 1.229, 1.228, 1.221, 1.209, 1.201, 1.186, 1.179, 1.174,
- 1.165, 1.174, 1.187, 1.201, 1.214, 1.223, 1.235, 1.241, 1.242, 1.241, 1.229, 1.221, 1.205, 1.188, 1.181, 1.177,
- 1.165, 1.174, 1.189, 1.207, 1.223, 1.235, 1.242, 1.253, 1.252, 1.245, 1.241, 1.228, 1.211, 1.189, 1.181, 1.178,
- 1.164, 1.173, 1.189, 1.207, 1.224, 1.238, 1.249, 1.255, 1.255, 1.249, 1.242, 1.228, 1.211, 1.191, 1.179, 1.176,
- 1.163, 1.172, 1.187, 1.207, 1.223, 1.237, 1.245, 1.253, 1.252, 1.243, 1.237, 1.228, 1.207, 1.188, 1.176, 1.173,
- 1.159, 1.167, 1.179, 1.199, 1.217, 1.227, 1.237, 1.241, 1.241, 1.237, 1.228, 1.217, 1.201, 1.184, 1.174, 1.169,
- 1.156, 1.164, 1.172, 1.189, 1.205, 1.217, 1.226, 1.229, 1.229, 1.222, 1.217, 1.204, 1.192, 1.177, 1.171, 1.166,
- 1.154, 1.159, 1.166, 1.177, 1.189, 1.205, 1.213, 1.216, 1.216, 1.209, 1.204, 1.192, 1.183, 1.172, 1.168, 1.162,
- 1.152, 1.155, 1.161, 1.166, 1.177, 1.188, 1.195, 1.198, 1.199, 1.196, 1.187, 1.183, 1.173, 1.168, 1.163, 1.162,
- 1.151, 1.154, 1.158, 1.162, 1.168, 1.177, 1.183, 1.184, 1.184, 1.184, 1.182, 1.172, 1.168, 1.165, 1.162, 1.161
- ]
- }
- ],
- "luminance_lut":
- [
- 2.236, 2.111, 1.912, 1.741, 1.579, 1.451, 1.379, 1.349, 1.349, 1.361, 1.411, 1.505, 1.644, 1.816, 2.034, 2.159,
- 2.139, 1.994, 1.796, 1.625, 1.467, 1.361, 1.285, 1.248, 1.239, 1.265, 1.321, 1.408, 1.536, 1.703, 1.903, 2.087,
- 2.047, 1.898, 1.694, 1.511, 1.373, 1.254, 1.186, 1.152, 1.142, 1.166, 1.226, 1.309, 1.441, 1.598, 1.799, 1.978,
- 1.999, 1.824, 1.615, 1.429, 1.281, 1.179, 1.113, 1.077, 1.071, 1.096, 1.153, 1.239, 1.357, 1.525, 1.726, 1.915,
- 1.976, 1.773, 1.563, 1.374, 1.222, 1.119, 1.064, 1.032, 1.031, 1.049, 1.099, 1.188, 1.309, 1.478, 1.681, 1.893,
- 1.973, 1.756, 1.542, 1.351, 1.196, 1.088, 1.028, 1.011, 1.004, 1.029, 1.077, 1.169, 1.295, 1.459, 1.663, 1.891,
- 1.973, 1.761, 1.541, 1.349, 1.193, 1.087, 1.031, 1.006, 1.006, 1.023, 1.075, 1.169, 1.298, 1.463, 1.667, 1.891,
- 1.982, 1.789, 1.568, 1.373, 1.213, 1.111, 1.051, 1.029, 1.024, 1.053, 1.106, 1.199, 1.329, 1.495, 1.692, 1.903,
- 2.015, 1.838, 1.621, 1.426, 1.268, 1.159, 1.101, 1.066, 1.068, 1.099, 1.166, 1.259, 1.387, 1.553, 1.751, 1.937,
- 2.076, 1.911, 1.692, 1.507, 1.346, 1.236, 1.169, 1.136, 1.139, 1.174, 1.242, 1.349, 1.475, 1.641, 1.833, 2.004,
- 2.193, 2.011, 1.798, 1.604, 1.444, 1.339, 1.265, 1.235, 1.237, 1.273, 1.351, 1.461, 1.598, 1.758, 1.956, 2.125,
- 2.263, 2.154, 1.916, 1.711, 1.549, 1.432, 1.372, 1.356, 1.356, 1.383, 1.455, 1.578, 1.726, 1.914, 2.119, 2.211
- ],
- "sigma": 0.006,
- "sigma_Cb": 0.00208
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2500, "ccm":
- [
- 1.70741, -0.05307, -0.65433, -0.62822, 1.68836, -0.06014, -0.04452, -1.87628, 2.92079
- ]
- },
- {
- "ct": 2803, "ccm":
- [
- 1.74383, -0.18731, -0.55652, -0.56491, 1.67772, -0.11281, -0.01522, -1.60635, 2.62157
- ]
- },
- {
- "ct": 2912, "ccm":
- [
- 1.75215, -0.22221, -0.52995, -0.54568, 1.63522, -0.08954, 0.02633, -1.56997, 2.54364
- ]
- },
- {
- "ct": 2914, "ccm":
- [
- 1.72423, -0.28939, -0.43484, -0.55188, 1.62925, -0.07737, 0.01959, -1.28661, 2.26702
- ]
- },
- {
- "ct": 3605, "ccm":
- [
- 1.80381, -0.43646, -0.36735, -0.46505, 1.56814, -0.10309, 0.00929, -1.00424, 1.99495
- ]
- },
- {
- "ct": 4540, "ccm":
- [
- 1.85263, -0.46545, -0.38719, -0.44136, 1.68443, -0.24307, 0.04108, -0.85599, 1.81491
- ]
- },
- {
- "ct": 5699, "ccm":
- [
- 1.98595, -0.63542, -0.35054, -0.34623, 1.54146, -0.19522, 0.00411, -0.70936, 1.70525
- ]
- },
- {
- "ct": 8625, "ccm":
- [
- 2.21637, -0.56663, -0.64974, -0.41133, 1.96625, -0.55492, -0.02307, -0.83529, 1.85837
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
-
- }
-}
diff --git a/src/ipa/raspberrypi/data/ov9281.json b/src/ipa/raspberrypi/data/ov9281.json
deleted file mode 100644
index 2319448b..00000000
--- a/src/ipa/raspberrypi/data/ov9281.json
+++ /dev/null
@@ -1,92 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 2000,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 800,
- "reference_Y": 20000
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 2.5
- },
- "rpi.sdn":
- {
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted": {
- "weights": [4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 3.0, 4.0, 8.0 ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- { "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [ 0, 0.4, 1000, 0.4 ] }
- ]
- },
- "y_target": [ 0, 0.16, 1000, 0.165, 10000, 0.17 ]
- },
- "rpi.alsc":
- {
- "n_iter": 0,
- "luminance_strength": 1.0,
- "corner_strength": 1.5
- },
- "rpi.contrast":
- {
- "ce_enable": 0,
- "gamma_curve": [
- 0, 0,
- 1024, 5040,
- 2048, 9338,
- 3072, 12356,
- 4096, 15312,
- 5120, 18051,
- 6144, 20790,
- 7168, 23193,
- 8192, 25744,
- 9216, 27942,
- 10240, 30035,
- 11264, 32005,
- 12288, 33975,
- 13312, 35815,
- 14336, 37600,
- 15360, 39168,
- 16384, 40642,
- 18432, 43379,
- 20480, 45749,
- 22528, 47753,
- 24576, 49621,
- 26624, 51253,
- 28672, 52698,
- 30720, 53796,
- 32768, 54876,
- 36864, 57012,
- 40960, 58656,
- 45056, 59954,
- 49152, 61183,
- 53248, 62355,
- 57344, 63419,
- 61440, 64476,
- 65535, 65535
- ]
- }
-}
diff --git a/src/ipa/raspberrypi/data/se327m12.json b/src/ipa/raspberrypi/data/se327m12.json
deleted file mode 100644
index 94af2239..00000000
--- a/src/ipa/raspberrypi/data/se327m12.json
+++ /dev/null
@@ -1,341 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 3840
- },
- "rpi.dpc":
- {
- },
- "rpi.lux":
- {
- "reference_shutter_speed": 6873,
- "reference_gain": 1.0,
- "reference_aperture": 1.0,
- "reference_lux": 800,
- "reference_Y": 12293
- },
- "rpi.noise":
- {
- "reference_constant": 0,
- "reference_slope": 1.986
- },
- "rpi.geq":
- {
- "offset": 207,
- "slope": 0.00539
- },
- "rpi.sdn":
- {
- },
- "rpi.awb":
- {
- "priors":
- [
- {
- "lux": 0, "prior":
- [
- 2000, 1.0, 3000, 0.0, 13000, 0.0
- ]
- },
- {
- "lux": 800, "prior":
- [
- 2000, 0.0, 6000, 2.0, 13000, 2.0
- ]
- },
- {
- "lux": 1500, "prior":
- [
- 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0
- ]
- }
- ],
- "modes":
- {
- "auto":
- {
- "lo": 2500,
- "hi": 8000
- },
- "incandescent":
- {
- "lo": 2500,
- "hi": 3000
- },
- "tungsten":
- {
- "lo": 3000,
- "hi": 3500
- },
- "fluorescent":
- {
- "lo": 4000,
- "hi": 4700
- },
- "indoor":
- {
- "lo": 3000,
- "hi": 5000
- },
- "daylight":
- {
- "lo": 5500,
- "hi": 6500
- },
- "cloudy":
- {
- "lo": 7000,
- "hi": 8600
- }
- },
- "bayes": 1,
- "ct_curve":
- [
- 2900.0, 0.9217, 0.3657, 3600.0, 0.7876, 0.4651, 4600.0, 0.6807, 0.5684, 5800.0, 0.5937, 0.6724, 8100.0, 0.5447, 0.7403
- ],
- "sensitivity_r": 1.0,
- "sensitivity_b": 1.0,
- "transverse_pos": 0.0162,
- "transverse_neg": 0.0204
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights":
- [
- 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
- ]
- },
- "spot":
- {
- "weights":
- [
- 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- ]
- },
- "matrix":
- {
- "weights":
- [
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
- ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter":
- [
- 100, 10000, 30000, 60000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- },
- "short":
- {
- "shutter":
- [
- 100, 5000, 10000, 20000, 120000
- ],
- "gain":
- [
- 1.0, 2.0, 4.0, 6.0, 8.0
- ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- }
- ],
- "highlight":
- [
- {
- "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.5, 1000, 0.5
- ]
- },
- {
- "bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target":
- [
- 0, 0.8, 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16, 1000, 0.165, 10000, 0.17
- ]
- },
- "rpi.alsc":
- {
- "omega": 1.3,
- "n_iter": 100,
- "luminance_strength": 0.5,
- "calibrations_Cr":
- [
- {
- "ct": 4000, "table":
- [
- 1.481, 1.471, 1.449, 1.429, 1.416, 1.404, 1.394, 1.389, 1.389, 1.389, 1.392, 1.397, 1.404, 1.416, 1.429, 1.437,
- 1.472, 1.456, 1.436, 1.418, 1.405, 1.394, 1.389, 1.384, 1.382, 1.382, 1.386, 1.388, 1.398, 1.407, 1.422, 1.429,
- 1.465, 1.443, 1.426, 1.411, 1.397, 1.389, 1.383, 1.377, 1.377, 1.377, 1.379, 1.384, 1.388, 1.398, 1.411, 1.422,
- 1.462, 1.441, 1.423, 1.409, 1.395, 1.385, 1.379, 1.376, 1.374, 1.374, 1.375, 1.379, 1.384, 1.394, 1.407, 1.418,
- 1.461, 1.439, 1.421, 1.407, 1.394, 1.385, 1.381, 1.376, 1.373, 1.373, 1.373, 1.376, 1.381, 1.389, 1.403, 1.415,
- 1.461, 1.439, 1.419, 1.404, 1.392, 1.384, 1.379, 1.376, 1.373, 1.372, 1.374, 1.375, 1.379, 1.389, 1.401, 1.413,
- 1.461, 1.438, 1.419, 1.402, 1.389, 1.383, 1.377, 1.375, 1.373, 1.372, 1.372, 1.375, 1.381, 1.388, 1.401, 1.414,
- 1.462, 1.438, 1.419, 1.403, 1.391, 1.381, 1.377, 1.374, 1.373, 1.373, 1.374, 1.376, 1.381, 1.389, 1.401, 1.414,
- 1.462, 1.441, 1.423, 1.405, 1.392, 1.383, 1.377, 1.374, 1.373, 1.372, 1.373, 1.376, 1.382, 1.391, 1.402, 1.414,
- 1.465, 1.444, 1.424, 1.407, 1.393, 1.382, 1.378, 1.373, 1.369, 1.369, 1.372, 1.375, 1.381, 1.389, 1.402, 1.417,
- 1.469, 1.449, 1.427, 1.413, 1.396, 1.384, 1.381, 1.375, 1.371, 1.371, 1.373, 1.377, 1.385, 1.393, 1.407, 1.422,
- 1.474, 1.456, 1.436, 1.419, 1.407, 1.391, 1.383, 1.379, 1.377, 1.377, 1.378, 1.381, 1.391, 1.404, 1.422, 1.426
- ]
- },
- {
- "ct": 5000, "table":
- [
- 1.742, 1.721, 1.689, 1.661, 1.639, 1.623, 1.613, 1.609, 1.607, 1.606, 1.609, 1.617, 1.626, 1.641, 1.665, 1.681,
- 1.728, 1.703, 1.672, 1.645, 1.631, 1.614, 1.602, 1.599, 1.596, 1.597, 1.601, 1.608, 1.618, 1.631, 1.653, 1.671,
- 1.713, 1.691, 1.658, 1.635, 1.618, 1.606, 1.595, 1.591, 1.588, 1.588, 1.591, 1.601, 1.608, 1.624, 1.641, 1.658,
- 1.707, 1.681, 1.651, 1.627, 1.613, 1.599, 1.591, 1.585, 1.583, 1.584, 1.587, 1.591, 1.601, 1.615, 1.633, 1.655,
- 1.699, 1.672, 1.644, 1.622, 1.606, 1.593, 1.586, 1.581, 1.579, 1.581, 1.583, 1.587, 1.597, 1.611, 1.631, 1.652,
- 1.697, 1.665, 1.637, 1.617, 1.601, 1.589, 1.584, 1.579, 1.577, 1.578, 1.581, 1.585, 1.597, 1.607, 1.627, 1.652,
- 1.697, 1.662, 1.634, 1.613, 1.599, 1.591, 1.583, 1.578, 1.576, 1.576, 1.579, 1.586, 1.597, 1.607, 1.628, 1.653,
- 1.697, 1.662, 1.633, 1.613, 1.598, 1.589, 1.582, 1.578, 1.576, 1.577, 1.582, 1.589, 1.598, 1.611, 1.635, 1.655,
- 1.701, 1.666, 1.636, 1.616, 1.602, 1.589, 1.583, 1.578, 1.577, 1.581, 1.583, 1.591, 1.601, 1.617, 1.639, 1.659,
- 1.708, 1.671, 1.641, 1.618, 1.603, 1.591, 1.584, 1.581, 1.578, 1.581, 1.585, 1.594, 1.604, 1.622, 1.646, 1.666,
- 1.714, 1.681, 1.648, 1.622, 1.608, 1.599, 1.591, 1.584, 1.583, 1.584, 1.589, 1.599, 1.614, 1.629, 1.653, 1.673,
- 1.719, 1.691, 1.659, 1.631, 1.618, 1.606, 1.596, 1.591, 1.591, 1.593, 1.599, 1.608, 1.623, 1.642, 1.665, 1.681
- ]
- }
- ],
- "calibrations_Cb":
- [
- {
- "ct": 4000, "table":
- [
- 2.253, 2.267, 2.289, 2.317, 2.342, 2.359, 2.373, 2.381, 2.381, 2.378, 2.368, 2.361, 2.344, 2.337, 2.314, 2.301,
- 2.262, 2.284, 2.314, 2.335, 2.352, 2.371, 2.383, 2.391, 2.393, 2.391, 2.381, 2.368, 2.361, 2.342, 2.322, 2.308,
- 2.277, 2.303, 2.321, 2.346, 2.364, 2.381, 2.391, 2.395, 2.397, 2.397, 2.395, 2.381, 2.367, 2.354, 2.332, 2.321,
- 2.277, 2.304, 2.327, 2.349, 2.369, 2.388, 2.393, 2.396, 2.396, 2.398, 2.396, 2.391, 2.376, 2.359, 2.339, 2.328,
- 2.279, 2.311, 2.327, 2.354, 2.377, 2.389, 2.393, 2.397, 2.397, 2.398, 2.395, 2.393, 2.382, 2.363, 2.344, 2.332,
- 2.282, 2.311, 2.329, 2.354, 2.377, 2.386, 2.396, 2.396, 2.395, 2.396, 2.397, 2.394, 2.383, 2.367, 2.346, 2.333,
- 2.283, 2.314, 2.333, 2.353, 2.375, 2.389, 2.394, 2.395, 2.395, 2.395, 2.396, 2.394, 2.386, 2.368, 2.354, 2.336,
- 2.287, 2.309, 2.331, 2.352, 2.373, 2.386, 2.394, 2.395, 2.395, 2.396, 2.396, 2.394, 2.384, 2.371, 2.354, 2.339,
- 2.289, 2.307, 2.326, 2.347, 2.369, 2.385, 2.392, 2.397, 2.398, 2.398, 2.397, 2.392, 2.383, 2.367, 2.352, 2.337,
- 2.286, 2.303, 2.322, 2.342, 2.361, 2.379, 2.389, 2.394, 2.397, 2.398, 2.396, 2.389, 2.381, 2.366, 2.346, 2.332,
- 2.284, 2.291, 2.312, 2.329, 2.351, 2.372, 2.381, 2.389, 2.393, 2.394, 2.389, 2.385, 2.374, 2.362, 2.338, 2.325,
- 2.283, 2.288, 2.305, 2.319, 2.339, 2.365, 2.374, 2.381, 2.384, 2.386, 2.385, 2.379, 2.368, 2.342, 2.325, 2.318
- ]
- },
- {
- "ct": 5000, "table":
- [
- 1.897, 1.919, 1.941, 1.969, 1.989, 2.003, 2.014, 2.019, 2.019, 2.017, 2.014, 2.008, 1.999, 1.988, 1.968, 1.944,
- 1.914, 1.932, 1.957, 1.982, 1.998, 2.014, 2.023, 2.029, 2.031, 2.029, 2.022, 2.014, 2.006, 1.995, 1.976, 1.955,
- 1.925, 1.951, 1.974, 1.996, 2.013, 2.027, 2.035, 2.039, 2.039, 2.038, 2.035, 2.026, 2.015, 2.002, 1.984, 1.963,
- 1.932, 1.958, 1.986, 2.007, 2.024, 2.034, 2.041, 2.041, 2.045, 2.045, 2.042, 2.033, 2.023, 2.009, 1.995, 1.971,
- 1.942, 1.964, 1.994, 2.012, 2.029, 2.038, 2.043, 2.046, 2.047, 2.046, 2.045, 2.039, 2.029, 2.014, 1.997, 1.977,
- 1.946, 1.974, 1.999, 2.015, 2.031, 2.041, 2.046, 2.047, 2.048, 2.047, 2.044, 2.041, 2.031, 2.019, 1.999, 1.978,
- 1.948, 1.975, 2.002, 2.018, 2.031, 2.041, 2.046, 2.047, 2.048, 2.048, 2.045, 2.041, 2.029, 2.019, 1.998, 1.978,
- 1.948, 1.973, 2.002, 2.018, 2.029, 2.042, 2.045, 2.048, 2.048, 2.048, 2.044, 2.037, 2.027, 2.014, 1.993, 1.978,
- 1.945, 1.969, 1.998, 2.015, 2.028, 2.037, 2.045, 2.046, 2.047, 2.044, 2.039, 2.033, 2.022, 2.008, 1.989, 1.971,
- 1.939, 1.964, 1.991, 2.011, 2.024, 2.032, 2.036, 2.042, 2.042, 2.039, 2.035, 2.024, 2.012, 1.998, 1.977, 1.964,
- 1.932, 1.953, 1.981, 2.006, 2.016, 2.024, 2.028, 2.031, 2.034, 2.031, 2.024, 2.015, 2.005, 1.989, 1.966, 1.955,
- 1.928, 1.944, 1.973, 1.999, 2.007, 2.016, 2.019, 2.025, 2.026, 2.025, 2.017, 2.008, 1.997, 1.975, 1.958, 1.947
- ]
- }
- ],
- "luminance_lut":
- [
- 1.877, 1.597, 1.397, 1.269, 1.191, 1.131, 1.093, 1.078, 1.071, 1.069, 1.086, 1.135, 1.221, 1.331, 1.474, 1.704,
- 1.749, 1.506, 1.334, 1.229, 1.149, 1.088, 1.058, 1.053, 1.051, 1.046, 1.053, 1.091, 1.163, 1.259, 1.387, 1.587,
- 1.661, 1.451, 1.295, 1.195, 1.113, 1.061, 1.049, 1.048, 1.047, 1.049, 1.049, 1.066, 1.124, 1.211, 1.333, 1.511,
- 1.615, 1.411, 1.267, 1.165, 1.086, 1.052, 1.047, 1.047, 1.047, 1.049, 1.052, 1.056, 1.099, 1.181, 1.303, 1.471,
- 1.576, 1.385, 1.252, 1.144, 1.068, 1.049, 1.044, 1.044, 1.045, 1.049, 1.053, 1.054, 1.083, 1.163, 1.283, 1.447,
- 1.561, 1.373, 1.245, 1.135, 1.064, 1.049, 1.044, 1.044, 1.044, 1.046, 1.048, 1.054, 1.073, 1.153, 1.271, 1.432,
- 1.571, 1.377, 1.242, 1.137, 1.066, 1.055, 1.052, 1.051, 1.051, 1.049, 1.047, 1.048, 1.068, 1.148, 1.271, 1.427,
- 1.582, 1.396, 1.259, 1.156, 1.085, 1.068, 1.059, 1.054, 1.049, 1.045, 1.041, 1.043, 1.074, 1.157, 1.284, 1.444,
- 1.623, 1.428, 1.283, 1.178, 1.105, 1.074, 1.069, 1.063, 1.056, 1.048, 1.046, 1.051, 1.094, 1.182, 1.311, 1.473,
- 1.691, 1.471, 1.321, 1.213, 1.135, 1.088, 1.073, 1.069, 1.063, 1.059, 1.053, 1.071, 1.129, 1.222, 1.351, 1.521,
- 1.808, 1.543, 1.371, 1.253, 1.174, 1.118, 1.085, 1.072, 1.067, 1.064, 1.071, 1.106, 1.176, 1.274, 1.398, 1.582,
- 1.969, 1.666, 1.447, 1.316, 1.223, 1.166, 1.123, 1.094, 1.089, 1.097, 1.118, 1.163, 1.239, 1.336, 1.471, 1.681
- ],
- "sigma": 0.00218,
- "sigma_Cb": 0.00194
- },
- "rpi.contrast":
- {
- "ce_enable": 1,
- "gamma_curve":
- [
- 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193,
- 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168,
- 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796,
- 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- {
- "ct": 2900, "ccm":
- [
- 1.44924, -0.12935, -0.31989, -0.65839, 1.95441, -0.29602, 0.18344, -1.22282, 2.03938
- ]
- },
- {
- "ct": 3000, "ccm":
- [
- 1.38736, 0.07714, -0.46451, -0.59691, 1.84335, -0.24644, 0.10092, -1.30441, 2.20349
- ]
- },
- {
- "ct": 3600, "ccm":
- [
- 1.51261, -0.27921, -0.23339, -0.55129, 1.83241, -0.28111, 0.11649, -0.93195, 1.81546
- ]
- },
- {
- "ct": 4600, "ccm":
- [
- 1.47082, -0.18523, -0.28559, -0.48923, 1.95126, -0.46203, 0.07951, -0.83987, 1.76036
- ]
- },
- {
- "ct": 5800, "ccm":
- [
- 1.57294, -0.36229, -0.21065, -0.42272, 1.80305, -0.38032, 0.03671, -0.66862, 1.63191
- ]
- },
- {
- "ct": 8100, "ccm":
- [
- 1.58803, -0.09912, -0.48891, -0.42594, 2.22303, -0.79709, -0.00621, -0.90516, 1.91137
- ]
- }
- ]
- },
- "rpi.sharpen":
- {
- "threshold": 2.0,
- "strength": 0.5,
- "limit": 0.5
- }
-}
diff --git a/src/ipa/raspberrypi/data/uncalibrated.json b/src/ipa/raspberrypi/data/uncalibrated.json
deleted file mode 100644
index 16a01e94..00000000
--- a/src/ipa/raspberrypi/data/uncalibrated.json
+++ /dev/null
@@ -1,82 +0,0 @@
-{
- "rpi.black_level":
- {
- "black_level": 4096
- },
- "rpi.awb":
- {
- "use_derivatives": 0,
- "bayes": 0
- },
- "rpi.agc":
- {
- "metering_modes":
- {
- "centre-weighted": {
- "weights": [4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 3.0, 4.0, 6.0 ]
- }
- },
- "constraint_modes":
- {
- "normal":
- [
- { "bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [ 0, 0.4, 1000, 0.4 ] }
- ]
- },
- "y_target": [ 0, 0.16, 1000, 0.165, 10000, 0.17 ]
- },
- "rpi.ccm":
- {
- "ccms":
- [
- { "ct": 4000, "ccm": [ 2.0, -1.0, 0.0, -0.5, 2.0, -0.5, 0, -1.0, 2.0 ] }
- ]
- },
- "rpi.contrast":
- {
- "ce_enable": 0,
- "gamma_curve": [
- 0, 0,
- 1024, 5040,
- 2048, 9338,
- 3072, 12356,
- 4096, 15312,
- 5120, 18051,
- 6144, 20790,
- 7168, 23193,
- 8192, 25744,
- 9216, 27942,
- 10240, 30035,
- 11264, 32005,
- 12288, 33975,
- 13312, 35815,
- 14336, 37600,
- 15360, 39168,
- 16384, 40642,
- 18432, 43379,
- 20480, 45749,
- 22528, 47753,
- 24576, 49621,
- 26624, 51253,
- 28672, 52698,
- 30720, 53796,
- 32768, 54876,
- 36864, 57012,
- 40960, 58656,
- 45056, 59954,
- 49152, 61183,
- 53248, 62355,
- 57344, 63419,
- 61440, 64476,
- 65535, 65535
- ]
- }
-}
diff --git a/src/ipa/raspberrypi/md_parser_smia.cpp b/src/ipa/raspberrypi/md_parser_smia.cpp
deleted file mode 100644
index ea5eac41..00000000
--- a/src/ipa/raspberrypi/md_parser_smia.cpp
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
- *
- * md_parser_smia.cpp - SMIA specification based embedded data parser
- */
-
-#include <libcamera/base/log.h>
-#include "md_parser.hpp"
-
-using namespace RPiController;
-using namespace libcamera;
-
-/*
- * This function goes through the embedded data to find the offsets (not
- * values!), in the data block, where the values of the given registers can
- * subsequently be found.
- *
- * Embedded data tag bytes, from Sony IMX219 datasheet but general to all SMIA
- * sensors, I think.
- */
-
-constexpr unsigned int LINE_START = 0x0a;
-constexpr unsigned int LINE_END_TAG = 0x07;
-constexpr unsigned int REG_HI_BITS = 0xaa;
-constexpr unsigned int REG_LOW_BITS = 0xa5;
-constexpr unsigned int REG_VALUE = 0x5a;
-constexpr unsigned int REG_SKIP = 0x55;
-
-MdParserSmia::MdParserSmia(std::initializer_list<uint32_t> registerList)
-{
- for (auto r : registerList)
- offsets_[r] = {};
-}
-
-MdParser::Status MdParserSmia::Parse(libcamera::Span<const uint8_t> buffer,
- RegisterMap &registers)
-{
- if (reset_) {
- /*
- * Search again through the metadata for all the registers
- * requested.
- */
- ASSERT(bits_per_pixel_);
-
- for (const auto &kv : offsets_)
- offsets_[kv.first] = {};
-
- ParseStatus ret = findRegs(buffer);
- /*
- * > 0 means "worked partially but parse again next time",
- * < 0 means "hard error".
- *
- * In either case, we retry parsing on the next frame.
- */
- if (ret != PARSE_OK)
- return ERROR;
-
- reset_ = false;
- }
-
- /* Populate the register values requested. */
- registers.clear();
- for (const auto &[reg, offset] : offsets_) {
- if (!offset) {
- reset_ = true;
- return NOTFOUND;
- }
- registers[reg] = buffer[offset.value()];
- }
-
- return OK;
-}
-
-MdParserSmia::ParseStatus MdParserSmia::findRegs(libcamera::Span<const uint8_t> buffer)
-{
- ASSERT(offsets_.size());
-
- if (buffer[0] != LINE_START)
- return NO_LINE_START;
-
- unsigned int current_offset = 1; /* after the LINE_START */
- unsigned int current_line_start = 0, current_line = 0;
- unsigned int reg_num = 0, regs_done = 0;
-
- while (1) {
- int tag = buffer[current_offset++];
-
- if ((bits_per_pixel_ == 10 &&
- (current_offset + 1 - current_line_start) % 5 == 0) ||
- (bits_per_pixel_ == 12 &&
- (current_offset + 1 - current_line_start) % 3 == 0)) {
- if (buffer[current_offset++] != REG_SKIP)
- return BAD_DUMMY;
- }
-
- int data_byte = buffer[current_offset++];
-
- if (tag == LINE_END_TAG) {
- if (data_byte != LINE_END_TAG)
- return BAD_LINE_END;
-
- if (num_lines_ && ++current_line == num_lines_)
- return MISSING_REGS;
-
- if (line_length_bytes_) {
- current_offset = current_line_start + line_length_bytes_;
-
- /* Require whole line to be in the buffer (if buffer size set). */
- if (buffer.size() &&
- current_offset + line_length_bytes_ > buffer.size())
- return MISSING_REGS;
-
- if (buffer[current_offset] != LINE_START)
- return NO_LINE_START;
- } else {
- /* allow a zero line length to mean "hunt for the next line" */
- while (current_offset < buffer.size() &&
- buffer[current_offset] != LINE_START)
- current_offset++;
-
- if (current_offset == buffer.size())
- return NO_LINE_START;
- }
-
- /* inc current_offset to after LINE_START */
- current_line_start = current_offset++;
- } else {
- if (tag == REG_HI_BITS)
- reg_num = (reg_num & 0xff) | (data_byte << 8);
- else if (tag == REG_LOW_BITS)
- reg_num = (reg_num & 0xff00) | data_byte;
- else if (tag == REG_SKIP)
- reg_num++;
- else if (tag == REG_VALUE) {
- auto reg = offsets_.find(reg_num);
-
- if (reg != offsets_.end()) {
- offsets_[reg_num] = current_offset - 1;
-
- if (++regs_done == offsets_.size())
- return PARSE_OK;
- }
- reg_num++;
- } else
- return ILLEGAL_TAG;
- }
- }
-}
diff --git a/src/ipa/raspberrypi/meson.build b/src/ipa/raspberrypi/meson.build
deleted file mode 100644
index 32897e07..00000000
--- a/src/ipa/raspberrypi/meson.build
+++ /dev/null
@@ -1,66 +0,0 @@
-# SPDX-License-Identifier: CC0-1.0
-
-ipa_name = 'ipa_rpi'
-
-rpi_ipa_deps = [
- libcamera_private,
- dependency('boost'),
- libatomic,
-]
-
-rpi_ipa_includes = [
- ipa_includes,
- libipa_includes,
- include_directories('controller')
-]
-
-rpi_ipa_sources = files([
- 'raspberrypi.cpp',
- 'md_parser_smia.cpp',
- 'cam_helper.cpp',
- 'cam_helper_ov5647.cpp',
- 'cam_helper_imx219.cpp',
- 'cam_helper_imx290.cpp',
- 'cam_helper_imx296.cpp',
- 'cam_helper_imx477.cpp',
- 'cam_helper_imx519.cpp',
- 'cam_helper_ov9281.cpp',
- 'controller/controller.cpp',
- 'controller/histogram.cpp',
- 'controller/algorithm.cpp',
- 'controller/rpi/alsc.cpp',
- 'controller/rpi/awb.cpp',
- 'controller/rpi/sharpen.cpp',
- 'controller/rpi/black_level.cpp',
- 'controller/rpi/focus.cpp',
- 'controller/rpi/geq.cpp',
- 'controller/rpi/noise.cpp',
- 'controller/rpi/lux.cpp',
- 'controller/rpi/agc.cpp',
- 'controller/rpi/dpc.cpp',
- 'controller/rpi/ccm.cpp',
- 'controller/rpi/contrast.cpp',
- 'controller/rpi/sdn.cpp',
- 'controller/pwl.cpp',
- 'controller/device_status.cpp',
-])
-
-mod = shared_module(ipa_name,
- [rpi_ipa_sources, libcamera_generated_ipa_headers],
- name_prefix : '',
- include_directories : rpi_ipa_includes,
- dependencies : rpi_ipa_deps,
- link_with : libipa,
- install : true,
- install_dir : ipa_install_dir)
-
-if ipa_sign_module
- custom_target(ipa_name + '.so.sign',
- input : mod,
- output : ipa_name + '.so.sign',
- command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
- install : false,
- build_by_default : true)
-endif
-
-subdir('data')
diff --git a/src/ipa/raspberrypi/raspberrypi.cpp b/src/ipa/raspberrypi/raspberrypi.cpp
deleted file mode 100644
index f8d37b87..00000000
--- a/src/ipa/raspberrypi/raspberrypi.cpp
+++ /dev/null
@@ -1,1460 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Ltd.
- *
- * rpi.cpp - Raspberry Pi Image Processing Algorithms
- */
-
-#include <algorithm>
-#include <array>
-#include <fcntl.h>
-#include <math.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/mman.h>
-
-#include <linux/bcm2835-isp.h>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/shared_fd.h>
-#include <libcamera/base/span.h>
-
-#include <libcamera/control_ids.h>
-#include <libcamera/controls.h>
-#include <libcamera/framebuffer.h>
-#include <libcamera/ipa/ipa_interface.h>
-#include <libcamera/ipa/ipa_module_info.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/request.h>
-
-#include "libcamera/internal/mapped_framebuffer.h"
-
-#include "agc_algorithm.hpp"
-#include "agc_status.h"
-#include "alsc_status.h"
-#include "awb_algorithm.hpp"
-#include "awb_status.h"
-#include "black_level_status.h"
-#include "cam_helper.hpp"
-#include "ccm_algorithm.hpp"
-#include "ccm_status.h"
-#include "contrast_algorithm.hpp"
-#include "contrast_status.h"
-#include "controller.hpp"
-#include "denoise_algorithm.hpp"
-#include "denoise_status.h"
-#include "dpc_status.h"
-#include "focus_status.h"
-#include "geq_status.h"
-#include "lux_status.h"
-#include "metadata.hpp"
-#include "noise_status.h"
-#include "sharpen_algorithm.hpp"
-#include "sharpen_status.h"
-
-namespace libcamera {
-
-using namespace std::literals::chrono_literals;
-using utils::Duration;
-
-/* Configure the sensor with these values initially. */
-constexpr double defaultAnalogueGain = 1.0;
-constexpr Duration defaultExposureTime = 20.0ms;
-constexpr Duration defaultMinFrameDuration = 1.0s / 30.0;
-constexpr Duration defaultMaxFrameDuration = 250.0s;
-
-/*
- * Determine the minimum allowable inter-frame duration to run the controller
- * algorithms. If the pipeline handler provider frames at a rate higher than this,
- * we rate-limit the controller Prepare() and Process() calls to lower than or
- * equal to this rate.
- */
-constexpr Duration controllerMinFrameDuration = 1.0s / 30.0;
-
-/* List of controls handled by the Raspberry Pi IPA */
-static const ControlInfoMap::Map ipaControls{
- { &controls::AeEnable, ControlInfo(false, true) },
- { &controls::ExposureTime, ControlInfo(0, 66666) },
- { &controls::AnalogueGain, ControlInfo(1.0f, 16.0f) },
- { &controls::AeMeteringMode, ControlInfo(controls::AeMeteringModeValues) },
- { &controls::AeConstraintMode, ControlInfo(controls::AeConstraintModeValues) },
- { &controls::AeExposureMode, ControlInfo(controls::AeExposureModeValues) },
- { &controls::ExposureValue, ControlInfo(-8.0f, 8.0f, 0.0f) },
- { &controls::AwbEnable, ControlInfo(false, true) },
- { &controls::ColourGains, ControlInfo(0.0f, 32.0f) },
- { &controls::AwbMode, ControlInfo(controls::AwbModeValues) },
- { &controls::Brightness, ControlInfo(-1.0f, 1.0f, 0.0f) },
- { &controls::Contrast, ControlInfo(0.0f, 32.0f, 1.0f) },
- { &controls::Saturation, ControlInfo(0.0f, 32.0f, 1.0f) },
- { &controls::Sharpness, ControlInfo(0.0f, 16.0f, 1.0f) },
- { &controls::ColourCorrectionMatrix, ControlInfo(-16.0f, 16.0f) },
- { &controls::ScalerCrop, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) },
- { &controls::FrameDurationLimits, ControlInfo(INT64_C(33333), INT64_C(120000)) },
- { &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) }
-};
-
-LOG_DEFINE_CATEGORY(IPARPI)
-
-namespace ipa::RPi {
-
-class IPARPi : public IPARPiInterface
-{
-public:
- IPARPi()
- : controller_(), frameCount_(0), checkCount_(0), mistrustCount_(0),
- lastRunTimestamp_(0), lsTable_(nullptr), firstStart_(true)
- {
- }
-
- ~IPARPi()
- {
- if (lsTable_)
- munmap(lsTable_, MaxLsGridSize);
- }
-
- int init(const IPASettings &settings, IPAInitResult *result) override;
- void start(const ControlList &controls, StartConfig *startConfig) override;
- void stop() override {}
-
- int configure(const IPACameraSensorInfo &sensorInfo,
- const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, ControlInfoMap> &entityControls,
- const IPAConfig &data,
- ControlList *controls, IPAConfigResult *result) override;
- void mapBuffers(const std::vector<IPABuffer> &buffers) override;
- void unmapBuffers(const std::vector<unsigned int> &ids) override;
- void signalStatReady(const uint32_t bufferId) override;
- void signalQueueRequest(const ControlList &controls) override;
- void signalIspPrepare(const ISPConfig &data) override;
-
-private:
- void setMode(const IPACameraSensorInfo &sensorInfo);
- bool validateSensorControls();
- bool validateIspControls();
- void queueRequest(const ControlList &controls);
- void returnEmbeddedBuffer(unsigned int bufferId);
- void prepareISP(const ISPConfig &data);
- void reportMetadata();
- void fillDeviceStatus(const ControlList &sensorControls);
- void processStats(unsigned int bufferId);
- void applyFrameDurations(Duration minFrameDuration, Duration maxFrameDuration);
- void applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls);
- void applyAWB(const struct AwbStatus *awbStatus, ControlList &ctrls);
- void applyDG(const struct AgcStatus *dgStatus, ControlList &ctrls);
- void applyCCM(const struct CcmStatus *ccmStatus, ControlList &ctrls);
- void applyBlackLevel(const struct BlackLevelStatus *blackLevelStatus, ControlList &ctrls);
- void applyGamma(const struct ContrastStatus *contrastStatus, ControlList &ctrls);
- void applyGEQ(const struct GeqStatus *geqStatus, ControlList &ctrls);
- void applyDenoise(const struct DenoiseStatus *denoiseStatus, ControlList &ctrls);
- void applySharpen(const struct SharpenStatus *sharpenStatus, ControlList &ctrls);
- void applyDPC(const struct DpcStatus *dpcStatus, ControlList &ctrls);
- void applyLS(const struct AlscStatus *lsStatus, ControlList &ctrls);
- void resampleTable(uint16_t dest[], double const src[12][16], int destW, int destH);
-
- std::map<unsigned int, MappedFrameBuffer> buffers_;
-
- ControlInfoMap sensorCtrls_;
- ControlInfoMap ispCtrls_;
- ControlList libcameraMetadata_;
-
- /* Camera sensor params. */
- CameraMode mode_;
-
- /* Raspberry Pi controller specific defines. */
- std::unique_ptr<RPiController::CamHelper> helper_;
- RPiController::Controller controller_;
- RPiController::Metadata rpiMetadata_;
-
- /*
- * We count frames to decide if the frame must be hidden (e.g. from
- * display) or mistrusted (i.e. not given to the control algos).
- */
- uint64_t frameCount_;
-
- /* For checking the sequencing of Prepare/Process calls. */
- uint64_t checkCount_;
-
- /* How many frames we should avoid running control algos on. */
- unsigned int mistrustCount_;
-
- /* Number of frames that need to be dropped on startup. */
- unsigned int dropFrameCount_;
-
- /* Frame timestamp for the last run of the controller. */
- uint64_t lastRunTimestamp_;
-
- /* Do we run a Controller::process() for this frame? */
- bool processPending_;
-
- /* LS table allocation passed in from the pipeline handler. */
- SharedFD lsTableHandle_;
- void *lsTable_;
-
- /* Distinguish the first camera start from others. */
- bool firstStart_;
-
- /* Frame duration (1/fps) limits. */
- Duration minFrameDuration_;
- Duration maxFrameDuration_;
-
- /* Maximum gain code for the sensor. */
- uint32_t maxSensorGainCode_;
-};
-
-int IPARPi::init(const IPASettings &settings, IPAInitResult *result)
-{
- /*
- * Load the "helper" for this sensor. This tells us all the device specific stuff
- * that the kernel driver doesn't. We only do this the first time; we don't need
- * to re-parse the metadata after a simple mode-switch for no reason.
- */
- helper_ = std::unique_ptr<RPiController::CamHelper>(RPiController::CamHelper::Create(settings.sensorModel));
- if (!helper_) {
- LOG(IPARPI, Error) << "Could not create camera helper for "
- << settings.sensorModel;
- return -EINVAL;
- }
-
- /*
- * Pass out the sensor config to the pipeline handler in order
- * to setup the staggered writer class.
- */
- int gainDelay, exposureDelay, vblankDelay, sensorMetadata;
- helper_->GetDelays(exposureDelay, gainDelay, vblankDelay);
- sensorMetadata = helper_->SensorEmbeddedDataPresent();
-
- result->sensorConfig.gainDelay = gainDelay;
- result->sensorConfig.exposureDelay = exposureDelay;
- result->sensorConfig.vblankDelay = vblankDelay;
- result->sensorConfig.sensorMetadata = sensorMetadata;
-
- /* Load the tuning file for this sensor. */
- controller_.Read(settings.configurationFile.c_str());
- controller_.Initialise();
-
- /* Return the controls handled by the IPA */
- ControlInfoMap::Map ctrlMap = ipaControls;
- result->controlInfo = ControlInfoMap(std::move(ctrlMap), controls::controls);
-
- return 0;
-}
-
-void IPARPi::start(const ControlList &controls, StartConfig *startConfig)
-{
- RPiController::Metadata metadata;
-
- ASSERT(startConfig);
- if (!controls.empty()) {
- /* We have been given some controls to action before start. */
- queueRequest(controls);
- }
-
- controller_.SwitchMode(mode_, &metadata);
-
- /* SwitchMode may supply updated exposure/gain values to use. */
- AgcStatus agcStatus;
- agcStatus.shutter_time = 0.0s;
- agcStatus.analogue_gain = 0.0;
-
- metadata.Get("agc.status", agcStatus);
- if (agcStatus.shutter_time && agcStatus.analogue_gain) {
- ControlList ctrls(sensorCtrls_);
- applyAGC(&agcStatus, ctrls);
- startConfig->controls = std::move(ctrls);
- }
-
- /*
- * Initialise frame counts, and decide how many frames must be hidden or
- * "mistrusted", which depends on whether this is a startup from cold,
- * or merely a mode switch in a running system.
- */
- frameCount_ = 0;
- checkCount_ = 0;
- if (firstStart_) {
- dropFrameCount_ = helper_->HideFramesStartup();
- mistrustCount_ = helper_->MistrustFramesStartup();
-
- /*
- * Query the AGC/AWB for how many frames they may take to
- * converge sufficiently. Where these numbers are non-zero
- * we must allow for the frames with bad statistics
- * (mistrustCount_) that they won't see. But if zero (i.e.
- * no convergence necessary), no frames need to be dropped.
- */
- unsigned int agcConvergenceFrames = 0;
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.GetAlgorithm("agc"));
- if (agc) {
- agcConvergenceFrames = agc->GetConvergenceFrames();
- if (agcConvergenceFrames)
- agcConvergenceFrames += mistrustCount_;
- }
-
- unsigned int awbConvergenceFrames = 0;
- RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
- controller_.GetAlgorithm("awb"));
- if (awb) {
- awbConvergenceFrames = awb->GetConvergenceFrames();
- if (awbConvergenceFrames)
- awbConvergenceFrames += mistrustCount_;
- }
-
- dropFrameCount_ = std::max({ dropFrameCount_, agcConvergenceFrames, awbConvergenceFrames });
- LOG(IPARPI, Debug) << "Drop " << dropFrameCount_ << " frames on startup";
- } else {
- dropFrameCount_ = helper_->HideFramesModeSwitch();
- mistrustCount_ = helper_->MistrustFramesModeSwitch();
- }
-
- startConfig->dropFrameCount = dropFrameCount_;
- const Duration maxSensorFrameDuration = mode_.max_frame_length * mode_.line_length;
- startConfig->maxSensorFrameLengthMs = maxSensorFrameDuration.get<std::milli>();
-
- firstStart_ = false;
- lastRunTimestamp_ = 0;
-}
-
-void IPARPi::setMode(const IPACameraSensorInfo &sensorInfo)
-{
- mode_.bitdepth = sensorInfo.bitsPerPixel;
- mode_.width = sensorInfo.outputSize.width;
- mode_.height = sensorInfo.outputSize.height;
- mode_.sensor_width = sensorInfo.activeAreaSize.width;
- mode_.sensor_height = sensorInfo.activeAreaSize.height;
- mode_.crop_x = sensorInfo.analogCrop.x;
- mode_.crop_y = sensorInfo.analogCrop.y;
-
- /*
- * Calculate scaling parameters. The scale_[xy] factors are determined
- * by the ratio between the crop rectangle size and the output size.
- */
- mode_.scale_x = sensorInfo.analogCrop.width / sensorInfo.outputSize.width;
- mode_.scale_y = sensorInfo.analogCrop.height / sensorInfo.outputSize.height;
-
- /*
- * We're not told by the pipeline handler how scaling is split between
- * binning and digital scaling. For now, as a heuristic, assume that
- * downscaling up to 2 is achieved through binning, and that any
- * additional scaling is achieved through digital scaling.
- *
- * \todo Get the pipeline handle to provide the full data
- */
- mode_.bin_x = std::min(2, static_cast<int>(mode_.scale_x));
- mode_.bin_y = std::min(2, static_cast<int>(mode_.scale_y));
-
- /* The noise factor is the square root of the total binning factor. */
- mode_.noise_factor = sqrt(mode_.bin_x * mode_.bin_y);
-
- /*
- * Calculate the line length as the ratio between the line length in
- * pixels and the pixel rate.
- */
- mode_.line_length = sensorInfo.lineLength * (1.0s / sensorInfo.pixelRate);
-
- /*
- * Set the frame length limits for the mode to ensure exposure and
- * framerate calculations are clipped appropriately.
- */
- mode_.min_frame_length = sensorInfo.minFrameLength;
- mode_.max_frame_length = sensorInfo.maxFrameLength;
-
- /*
- * Some sensors may have different sensitivities in different modes;
- * the CamHelper will know the correct value.
- */
- mode_.sensitivity = helper_->GetModeSensitivity(mode_);
-}
-
-int IPARPi::configure(const IPACameraSensorInfo &sensorInfo,
- [[maybe_unused]] const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, ControlInfoMap> &entityControls,
- const IPAConfig &ipaConfig,
- ControlList *controls, IPAConfigResult *result)
-{
- if (entityControls.size() != 2) {
- LOG(IPARPI, Error) << "No ISP or sensor controls found.";
- return -1;
- }
-
- sensorCtrls_ = entityControls.at(0);
- ispCtrls_ = entityControls.at(1);
-
- if (!validateSensorControls()) {
- LOG(IPARPI, Error) << "Sensor control validation failed.";
- return -1;
- }
-
- if (!validateIspControls()) {
- LOG(IPARPI, Error) << "ISP control validation failed.";
- return -1;
- }
-
- maxSensorGainCode_ = sensorCtrls_.at(V4L2_CID_ANALOGUE_GAIN).max().get<int32_t>();
-
- /* Setup a metadata ControlList to output metadata. */
- libcameraMetadata_ = ControlList(controls::controls);
-
- /* Re-assemble camera mode using the sensor info. */
- setMode(sensorInfo);
-
- mode_.transform = static_cast<libcamera::Transform>(ipaConfig.transform);
-
- /* Store the lens shading table pointer and handle if available. */
- if (ipaConfig.lsTableHandle.isValid()) {
- /* Remove any previous table, if there was one. */
- if (lsTable_) {
- munmap(lsTable_, MaxLsGridSize);
- lsTable_ = nullptr;
- }
-
- /* Map the LS table buffer into user space. */
- lsTableHandle_ = std::move(ipaConfig.lsTableHandle);
- if (lsTableHandle_.isValid()) {
- lsTable_ = mmap(nullptr, MaxLsGridSize, PROT_READ | PROT_WRITE,
- MAP_SHARED, lsTableHandle_.get(), 0);
-
- if (lsTable_ == MAP_FAILED) {
- LOG(IPARPI, Error) << "dmaHeap mmap failure for LS table.";
- lsTable_ = nullptr;
- }
- }
- }
-
- /* Pass the camera mode to the CamHelper to setup algorithms. */
- helper_->SetCameraMode(mode_);
-
- /*
- * Initialise this ControlList correctly, even if empty, in case the IPA is
- * running is isolation mode (passing the ControlList through the IPC layer).
- */
- ControlList ctrls(sensorCtrls_);
-
- /* The pipeline handler passes out the mode's sensitivity. */
- result->modeSensitivity = mode_.sensitivity;
-
- if (firstStart_) {
- /* Supply initial values for frame durations. */
- applyFrameDurations(defaultMinFrameDuration, defaultMaxFrameDuration);
-
- /* Supply initial values for gain and exposure. */
- AgcStatus agcStatus;
- agcStatus.shutter_time = defaultExposureTime;
- agcStatus.analogue_gain = defaultAnalogueGain;
- applyAGC(&agcStatus, ctrls);
- }
-
- ASSERT(controls);
- *controls = std::move(ctrls);
-
- /*
- * Apply the correct limits to the exposure, gain and frame duration controls
- * based on the current sensor mode.
- */
- ControlInfoMap::Map ctrlMap = ipaControls;
- const Duration minSensorFrameDuration = mode_.min_frame_length * mode_.line_length;
- const Duration maxSensorFrameDuration = mode_.max_frame_length * mode_.line_length;
- ctrlMap[&controls::FrameDurationLimits] =
- ControlInfo(static_cast<int64_t>(minSensorFrameDuration.get<std::micro>()),
- static_cast<int64_t>(maxSensorFrameDuration.get<std::micro>()));
-
- ctrlMap[&controls::AnalogueGain] =
- ControlInfo(1.0f, static_cast<float>(helper_->Gain(maxSensorGainCode_)));
-
- /*
- * Calculate the max exposure limit from the frame duration limit as V4L2
- * will limit the maximum control value based on the current VBLANK value.
- */
- Duration maxShutter = Duration::max();
- helper_->GetVBlanking(maxShutter, minSensorFrameDuration, maxSensorFrameDuration);
- const uint32_t exposureMin = sensorCtrls_.at(V4L2_CID_EXPOSURE).min().get<int32_t>();
-
- ctrlMap[&controls::ExposureTime] =
- ControlInfo(static_cast<int32_t>(helper_->Exposure(exposureMin).get<std::micro>()),
- static_cast<int32_t>(maxShutter.get<std::micro>()));
-
- result->controlInfo = ControlInfoMap(std::move(ctrlMap), controls::controls);
- return 0;
-}
-
-void IPARPi::mapBuffers(const std::vector<IPABuffer> &buffers)
-{
- for (const IPABuffer &buffer : buffers) {
- const FrameBuffer fb(buffer.planes);
- buffers_.emplace(buffer.id,
- MappedFrameBuffer(&fb, MappedFrameBuffer::MapFlag::ReadWrite));
- }
-}
-
-void IPARPi::unmapBuffers(const std::vector<unsigned int> &ids)
-{
- for (unsigned int id : ids) {
- auto it = buffers_.find(id);
- if (it == buffers_.end())
- continue;
-
- buffers_.erase(id);
- }
-}
-
-void IPARPi::signalStatReady(uint32_t bufferId)
-{
- if (++checkCount_ != frameCount_) /* assert here? */
- LOG(IPARPI, Error) << "WARNING: Prepare/Process mismatch!!!";
- if (processPending_ && frameCount_ > mistrustCount_)
- processStats(bufferId);
-
- reportMetadata();
-
- statsMetadataComplete.emit(bufferId & MaskID, libcameraMetadata_);
-}
-
-void IPARPi::signalQueueRequest(const ControlList &controls)
-{
- queueRequest(controls);
-}
-
-void IPARPi::signalIspPrepare(const ISPConfig &data)
-{
- /*
- * At start-up, or after a mode-switch, we may want to
- * avoid running the control algos for a few frames in case
- * they are "unreliable".
- */
- prepareISP(data);
- frameCount_++;
-
- /* Ready to push the input buffer into the ISP. */
- runIsp.emit(data.bayerBufferId & MaskID);
-}
-
-void IPARPi::reportMetadata()
-{
- std::unique_lock<RPiController::Metadata> lock(rpiMetadata_);
-
- /*
- * Certain information about the current frame and how it will be
- * processed can be extracted and placed into the libcamera metadata
- * buffer, where an application could query it.
- */
- DeviceStatus *deviceStatus = rpiMetadata_.GetLocked<DeviceStatus>("device.status");
- if (deviceStatus) {
- libcameraMetadata_.set(controls::ExposureTime,
- deviceStatus->shutter_speed.get<std::micro>());
- libcameraMetadata_.set(controls::AnalogueGain, deviceStatus->analogue_gain);
- libcameraMetadata_.set(controls::FrameDuration,
- helper_->Exposure(deviceStatus->frame_length).get<std::micro>());
- if (deviceStatus->sensor_temperature)
- libcameraMetadata_.set(controls::SensorTemperature, *deviceStatus->sensor_temperature);
- }
-
- AgcStatus *agcStatus = rpiMetadata_.GetLocked<AgcStatus>("agc.status");
- if (agcStatus) {
- libcameraMetadata_.set(controls::AeLocked, agcStatus->locked);
- libcameraMetadata_.set(controls::DigitalGain, agcStatus->digital_gain);
- }
-
- LuxStatus *luxStatus = rpiMetadata_.GetLocked<LuxStatus>("lux.status");
- if (luxStatus)
- libcameraMetadata_.set(controls::Lux, luxStatus->lux);
-
- AwbStatus *awbStatus = rpiMetadata_.GetLocked<AwbStatus>("awb.status");
- if (awbStatus) {
- libcameraMetadata_.set(controls::ColourGains, { static_cast<float>(awbStatus->gain_r),
- static_cast<float>(awbStatus->gain_b) });
- libcameraMetadata_.set(controls::ColourTemperature, awbStatus->temperature_K);
- }
-
- BlackLevelStatus *blackLevelStatus = rpiMetadata_.GetLocked<BlackLevelStatus>("black_level.status");
- if (blackLevelStatus)
- libcameraMetadata_.set(controls::SensorBlackLevels,
- { static_cast<int32_t>(blackLevelStatus->black_level_r),
- static_cast<int32_t>(blackLevelStatus->black_level_g),
- static_cast<int32_t>(blackLevelStatus->black_level_g),
- static_cast<int32_t>(blackLevelStatus->black_level_b) });
-
- FocusStatus *focusStatus = rpiMetadata_.GetLocked<FocusStatus>("focus.status");
- if (focusStatus && focusStatus->num == 12) {
- /*
- * We get a 4x3 grid of regions by default. Calculate the average
- * FoM over the central two positions to give an overall scene FoM.
- * This can change later if it is not deemed suitable.
- */
- int32_t focusFoM = (focusStatus->focus_measures[5] + focusStatus->focus_measures[6]) / 2;
- libcameraMetadata_.set(controls::FocusFoM, focusFoM);
- }
-
- CcmStatus *ccmStatus = rpiMetadata_.GetLocked<CcmStatus>("ccm.status");
- if (ccmStatus) {
- float m[9];
- for (unsigned int i = 0; i < 9; i++)
- m[i] = ccmStatus->matrix[i];
- libcameraMetadata_.set(controls::ColourCorrectionMatrix, m);
- }
-}
-
-bool IPARPi::validateSensorControls()
-{
- static const uint32_t ctrls[] = {
- V4L2_CID_ANALOGUE_GAIN,
- V4L2_CID_EXPOSURE,
- V4L2_CID_VBLANK,
- };
-
- for (auto c : ctrls) {
- if (sensorCtrls_.find(c) == sensorCtrls_.end()) {
- LOG(IPARPI, Error) << "Unable to find sensor control "
- << utils::hex(c);
- return false;
- }
- }
-
- return true;
-}
-
-bool IPARPi::validateIspControls()
-{
- static const uint32_t ctrls[] = {
- V4L2_CID_RED_BALANCE,
- V4L2_CID_BLUE_BALANCE,
- V4L2_CID_DIGITAL_GAIN,
- V4L2_CID_USER_BCM2835_ISP_CC_MATRIX,
- V4L2_CID_USER_BCM2835_ISP_GAMMA,
- V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL,
- V4L2_CID_USER_BCM2835_ISP_GEQ,
- V4L2_CID_USER_BCM2835_ISP_DENOISE,
- V4L2_CID_USER_BCM2835_ISP_SHARPEN,
- V4L2_CID_USER_BCM2835_ISP_DPC,
- V4L2_CID_USER_BCM2835_ISP_LENS_SHADING,
- V4L2_CID_USER_BCM2835_ISP_CDN,
- };
-
- for (auto c : ctrls) {
- if (ispCtrls_.find(c) == ispCtrls_.end()) {
- LOG(IPARPI, Error) << "Unable to find ISP control "
- << utils::hex(c);
- return false;
- }
- }
-
- return true;
-}
-
-/*
- * Converting between enums (used in the libcamera API) and the names that
- * we use to identify different modes. Unfortunately, the conversion tables
- * must be kept up-to-date by hand.
- */
-static const std::map<int32_t, std::string> MeteringModeTable = {
- { controls::MeteringCentreWeighted, "centre-weighted" },
- { controls::MeteringSpot, "spot" },
- { controls::MeteringMatrix, "matrix" },
- { controls::MeteringCustom, "custom" },
-};
-
-static const std::map<int32_t, std::string> ConstraintModeTable = {
- { controls::ConstraintNormal, "normal" },
- { controls::ConstraintHighlight, "highlight" },
- { controls::ConstraintCustom, "custom" },
-};
-
-static const std::map<int32_t, std::string> ExposureModeTable = {
- { controls::ExposureNormal, "normal" },
- { controls::ExposureShort, "short" },
- { controls::ExposureLong, "long" },
- { controls::ExposureCustom, "custom" },
-};
-
-static const std::map<int32_t, std::string> AwbModeTable = {
- { controls::AwbAuto, "auto" },
- { controls::AwbIncandescent, "incandescent" },
- { controls::AwbTungsten, "tungsten" },
- { controls::AwbFluorescent, "fluorescent" },
- { controls::AwbIndoor, "indoor" },
- { controls::AwbDaylight, "daylight" },
- { controls::AwbCloudy, "cloudy" },
- { controls::AwbCustom, "custom" },
-};
-
-static const std::map<int32_t, RPiController::DenoiseMode> DenoiseModeTable = {
- { controls::draft::NoiseReductionModeOff, RPiController::DenoiseMode::Off },
- { controls::draft::NoiseReductionModeFast, RPiController::DenoiseMode::ColourFast },
- { controls::draft::NoiseReductionModeHighQuality, RPiController::DenoiseMode::ColourHighQuality },
- { controls::draft::NoiseReductionModeMinimal, RPiController::DenoiseMode::ColourOff },
- { controls::draft::NoiseReductionModeZSL, RPiController::DenoiseMode::ColourHighQuality },
-};
-
-void IPARPi::queueRequest(const ControlList &controls)
-{
- /* Clear the return metadata buffer. */
- libcameraMetadata_.clear();
-
- for (auto const &ctrl : controls) {
- LOG(IPARPI, Debug) << "Request ctrl: "
- << controls::controls.at(ctrl.first)->name()
- << " = " << ctrl.second.toString();
-
- switch (ctrl.first) {
- case controls::AE_ENABLE: {
- RPiController::Algorithm *agc = controller_.GetAlgorithm("agc");
- if (!agc) {
- LOG(IPARPI, Warning)
- << "Could not set AE_ENABLE - no AGC algorithm";
- break;
- }
-
- if (ctrl.second.get<bool>() == false)
- agc->Pause();
- else
- agc->Resume();
-
- libcameraMetadata_.set(controls::AeEnable, ctrl.second.get<bool>());
- break;
- }
-
- case controls::EXPOSURE_TIME: {
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.GetAlgorithm("agc"));
- if (!agc) {
- LOG(IPARPI, Warning)
- << "Could not set EXPOSURE_TIME - no AGC algorithm";
- break;
- }
-
- /* The control provides units of microseconds. */
- agc->SetFixedShutter(ctrl.second.get<int32_t>() * 1.0us);
-
- libcameraMetadata_.set(controls::ExposureTime, ctrl.second.get<int32_t>());
- break;
- }
-
- case controls::ANALOGUE_GAIN: {
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.GetAlgorithm("agc"));
- if (!agc) {
- LOG(IPARPI, Warning)
- << "Could not set ANALOGUE_GAIN - no AGC algorithm";
- break;
- }
-
- agc->SetFixedAnalogueGain(ctrl.second.get<float>());
-
- libcameraMetadata_.set(controls::AnalogueGain,
- ctrl.second.get<float>());
- break;
- }
-
- case controls::AE_METERING_MODE: {
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.GetAlgorithm("agc"));
- if (!agc) {
- LOG(IPARPI, Warning)
- << "Could not set AE_METERING_MODE - no AGC algorithm";
- break;
- }
-
- int32_t idx = ctrl.second.get<int32_t>();
- if (MeteringModeTable.count(idx)) {
- agc->SetMeteringMode(MeteringModeTable.at(idx));
- libcameraMetadata_.set(controls::AeMeteringMode, idx);
- } else {
- LOG(IPARPI, Error) << "Metering mode " << idx
- << " not recognised";
- }
- break;
- }
-
- case controls::AE_CONSTRAINT_MODE: {
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.GetAlgorithm("agc"));
- if (!agc) {
- LOG(IPARPI, Warning)
- << "Could not set AE_CONSTRAINT_MODE - no AGC algorithm";
- break;
- }
-
- int32_t idx = ctrl.second.get<int32_t>();
- if (ConstraintModeTable.count(idx)) {
- agc->SetConstraintMode(ConstraintModeTable.at(idx));
- libcameraMetadata_.set(controls::AeConstraintMode, idx);
- } else {
- LOG(IPARPI, Error) << "Constraint mode " << idx
- << " not recognised";
- }
- break;
- }
-
- case controls::AE_EXPOSURE_MODE: {
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.GetAlgorithm("agc"));
- if (!agc) {
- LOG(IPARPI, Warning)
- << "Could not set AE_EXPOSURE_MODE - no AGC algorithm";
- break;
- }
-
- int32_t idx = ctrl.second.get<int32_t>();
- if (ExposureModeTable.count(idx)) {
- agc->SetExposureMode(ExposureModeTable.at(idx));
- libcameraMetadata_.set(controls::AeExposureMode, idx);
- } else {
- LOG(IPARPI, Error) << "Exposure mode " << idx
- << " not recognised";
- }
- break;
- }
-
- case controls::EXPOSURE_VALUE: {
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.GetAlgorithm("agc"));
- if (!agc) {
- LOG(IPARPI, Warning)
- << "Could not set EXPOSURE_VALUE - no AGC algorithm";
- break;
- }
-
- /*
- * The SetEv() function takes in a direct exposure multiplier.
- * So convert to 2^EV
- */
- double ev = pow(2.0, ctrl.second.get<float>());
- agc->SetEv(ev);
- libcameraMetadata_.set(controls::ExposureValue,
- ctrl.second.get<float>());
- break;
- }
-
- case controls::AWB_ENABLE: {
- RPiController::Algorithm *awb = controller_.GetAlgorithm("awb");
- if (!awb) {
- LOG(IPARPI, Warning)
- << "Could not set AWB_ENABLE - no AWB algorithm";
- break;
- }
-
- if (ctrl.second.get<bool>() == false)
- awb->Pause();
- else
- awb->Resume();
-
- libcameraMetadata_.set(controls::AwbEnable,
- ctrl.second.get<bool>());
- break;
- }
-
- case controls::AWB_MODE: {
- RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
- controller_.GetAlgorithm("awb"));
- if (!awb) {
- LOG(IPARPI, Warning)
- << "Could not set AWB_MODE - no AWB algorithm";
- break;
- }
-
- int32_t idx = ctrl.second.get<int32_t>();
- if (AwbModeTable.count(idx)) {
- awb->SetMode(AwbModeTable.at(idx));
- libcameraMetadata_.set(controls::AwbMode, idx);
- } else {
- LOG(IPARPI, Error) << "AWB mode " << idx
- << " not recognised";
- }
- break;
- }
-
- case controls::COLOUR_GAINS: {
- auto gains = ctrl.second.get<Span<const float>>();
- RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
- controller_.GetAlgorithm("awb"));
- if (!awb) {
- LOG(IPARPI, Warning)
- << "Could not set COLOUR_GAINS - no AWB algorithm";
- break;
- }
-
- awb->SetManualGains(gains[0], gains[1]);
- if (gains[0] != 0.0f && gains[1] != 0.0f)
- /* A gain of 0.0f will switch back to auto mode. */
- libcameraMetadata_.set(controls::ColourGains,
- { gains[0], gains[1] });
- break;
- }
-
- case controls::BRIGHTNESS: {
- RPiController::ContrastAlgorithm *contrast = dynamic_cast<RPiController::ContrastAlgorithm *>(
- controller_.GetAlgorithm("contrast"));
- if (!contrast) {
- LOG(IPARPI, Warning)
- << "Could not set BRIGHTNESS - no contrast algorithm";
- break;
- }
-
- contrast->SetBrightness(ctrl.second.get<float>() * 65536);
- libcameraMetadata_.set(controls::Brightness,
- ctrl.second.get<float>());
- break;
- }
-
- case controls::CONTRAST: {
- RPiController::ContrastAlgorithm *contrast = dynamic_cast<RPiController::ContrastAlgorithm *>(
- controller_.GetAlgorithm("contrast"));
- if (!contrast) {
- LOG(IPARPI, Warning)
- << "Could not set CONTRAST - no contrast algorithm";
- break;
- }
-
- contrast->SetContrast(ctrl.second.get<float>());
- libcameraMetadata_.set(controls::Contrast,
- ctrl.second.get<float>());
- break;
- }
-
- case controls::SATURATION: {
- RPiController::CcmAlgorithm *ccm = dynamic_cast<RPiController::CcmAlgorithm *>(
- controller_.GetAlgorithm("ccm"));
- if (!ccm) {
- LOG(IPARPI, Warning)
- << "Could not set SATURATION - no ccm algorithm";
- break;
- }
-
- ccm->SetSaturation(ctrl.second.get<float>());
- libcameraMetadata_.set(controls::Saturation,
- ctrl.second.get<float>());
- break;
- }
-
- case controls::SHARPNESS: {
- RPiController::SharpenAlgorithm *sharpen = dynamic_cast<RPiController::SharpenAlgorithm *>(
- controller_.GetAlgorithm("sharpen"));
- if (!sharpen) {
- LOG(IPARPI, Warning)
- << "Could not set SHARPNESS - no sharpen algorithm";
- break;
- }
-
- sharpen->SetStrength(ctrl.second.get<float>());
- libcameraMetadata_.set(controls::Sharpness,
- ctrl.second.get<float>());
- break;
- }
-
- case controls::SCALER_CROP: {
- /* We do nothing with this, but should avoid the warning below. */
- break;
- }
-
- case controls::FRAME_DURATION_LIMITS: {
- auto frameDurations = ctrl.second.get<Span<const int64_t>>();
- applyFrameDurations(frameDurations[0] * 1.0us, frameDurations[1] * 1.0us);
- break;
- }
-
- case controls::NOISE_REDUCTION_MODE: {
- RPiController::DenoiseAlgorithm *sdn = dynamic_cast<RPiController::DenoiseAlgorithm *>(
- controller_.GetAlgorithm("SDN"));
- if (!sdn) {
- LOG(IPARPI, Warning)
- << "Could not set NOISE_REDUCTION_MODE - no SDN algorithm";
- break;
- }
-
- int32_t idx = ctrl.second.get<int32_t>();
- auto mode = DenoiseModeTable.find(idx);
- if (mode != DenoiseModeTable.end()) {
- sdn->SetMode(mode->second);
-
- /*
- * \todo If the colour denoise is not going to run due to an
- * analysis image resolution or format mismatch, we should
- * report the status correctly in the metadata.
- */
- libcameraMetadata_.set(controls::draft::NoiseReductionMode, idx);
- } else {
- LOG(IPARPI, Error) << "Noise reduction mode " << idx
- << " not recognised";
- }
- break;
- }
-
- default:
- LOG(IPARPI, Warning)
- << "Ctrl " << controls::controls.at(ctrl.first)->name()
- << " is not handled.";
- break;
- }
- }
-}
-
-void IPARPi::returnEmbeddedBuffer(unsigned int bufferId)
-{
- embeddedComplete.emit(bufferId & MaskID);
-}
-
-void IPARPi::prepareISP(const ISPConfig &data)
-{
- int64_t frameTimestamp = data.controls.get(controls::SensorTimestamp);
- RPiController::Metadata lastMetadata;
- Span<uint8_t> embeddedBuffer;
-
- lastMetadata = std::move(rpiMetadata_);
- fillDeviceStatus(data.controls);
-
- if (data.embeddedBufferPresent) {
- /*
- * Pipeline handler has supplied us with an embedded data buffer,
- * we must pass it to the CamHelper for parsing.
- */
- auto it = buffers_.find(data.embeddedBufferId);
- ASSERT(it != buffers_.end());
- embeddedBuffer = it->second.planes()[0];
- }
-
- /*
- * This may overwrite the DeviceStatus using values from the sensor
- * metadata, and may also do additional custom processing.
- */
- helper_->Prepare(embeddedBuffer, rpiMetadata_);
-
- /* Done with embedded data now, return to pipeline handler asap. */
- if (data.embeddedBufferPresent)
- returnEmbeddedBuffer(data.embeddedBufferId);
-
- /* Allow a 10% margin on the comparison below. */
- Duration delta = (frameTimestamp - lastRunTimestamp_) * 1.0ns;
- if (lastRunTimestamp_ && frameCount_ > dropFrameCount_ &&
- delta < controllerMinFrameDuration * 0.9) {
- /*
- * Ensure we merge the previous frame's metadata with the current
- * frame. This will not overwrite exposure/gain values for the
- * current frame, or any other bits of metadata that were added
- * in helper_->Prepare().
- */
- rpiMetadata_.Merge(lastMetadata);
- processPending_ = false;
- return;
- }
-
- lastRunTimestamp_ = frameTimestamp;
- processPending_ = true;
-
- ControlList ctrls(ispCtrls_);
-
- controller_.Prepare(&rpiMetadata_);
-
- /* Lock the metadata buffer to avoid constant locks/unlocks. */
- std::unique_lock<RPiController::Metadata> lock(rpiMetadata_);
-
- AwbStatus *awbStatus = rpiMetadata_.GetLocked<AwbStatus>("awb.status");
- if (awbStatus)
- applyAWB(awbStatus, ctrls);
-
- CcmStatus *ccmStatus = rpiMetadata_.GetLocked<CcmStatus>("ccm.status");
- if (ccmStatus)
- applyCCM(ccmStatus, ctrls);
-
- AgcStatus *dgStatus = rpiMetadata_.GetLocked<AgcStatus>("agc.status");
- if (dgStatus)
- applyDG(dgStatus, ctrls);
-
- AlscStatus *lsStatus = rpiMetadata_.GetLocked<AlscStatus>("alsc.status");
- if (lsStatus)
- applyLS(lsStatus, ctrls);
-
- ContrastStatus *contrastStatus = rpiMetadata_.GetLocked<ContrastStatus>("contrast.status");
- if (contrastStatus)
- applyGamma(contrastStatus, ctrls);
-
- BlackLevelStatus *blackLevelStatus = rpiMetadata_.GetLocked<BlackLevelStatus>("black_level.status");
- if (blackLevelStatus)
- applyBlackLevel(blackLevelStatus, ctrls);
-
- GeqStatus *geqStatus = rpiMetadata_.GetLocked<GeqStatus>("geq.status");
- if (geqStatus)
- applyGEQ(geqStatus, ctrls);
-
- DenoiseStatus *denoiseStatus = rpiMetadata_.GetLocked<DenoiseStatus>("denoise.status");
- if (denoiseStatus)
- applyDenoise(denoiseStatus, ctrls);
-
- SharpenStatus *sharpenStatus = rpiMetadata_.GetLocked<SharpenStatus>("sharpen.status");
- if (sharpenStatus)
- applySharpen(sharpenStatus, ctrls);
-
- DpcStatus *dpcStatus = rpiMetadata_.GetLocked<DpcStatus>("dpc.status");
- if (dpcStatus)
- applyDPC(dpcStatus, ctrls);
-
- if (!ctrls.empty())
- setIspControls.emit(ctrls);
-}
-
-void IPARPi::fillDeviceStatus(const ControlList &sensorControls)
-{
- DeviceStatus deviceStatus = {};
-
- int32_t exposureLines = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
- int32_t gainCode = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>();
- int32_t vblank = sensorControls.get(V4L2_CID_VBLANK).get<int32_t>();
-
- deviceStatus.shutter_speed = helper_->Exposure(exposureLines);
- deviceStatus.analogue_gain = helper_->Gain(gainCode);
- deviceStatus.frame_length = mode_.height + vblank;
-
- LOG(IPARPI, Debug) << "Metadata - " << deviceStatus;
-
- rpiMetadata_.Set("device.status", deviceStatus);
-}
-
-void IPARPi::processStats(unsigned int bufferId)
-{
- auto it = buffers_.find(bufferId);
- if (it == buffers_.end()) {
- LOG(IPARPI, Error) << "Could not find stats buffer!";
- return;
- }
-
- Span<uint8_t> mem = it->second.planes()[0];
- bcm2835_isp_stats *stats = reinterpret_cast<bcm2835_isp_stats *>(mem.data());
- RPiController::StatisticsPtr statistics = std::make_shared<bcm2835_isp_stats>(*stats);
- helper_->Process(statistics, rpiMetadata_);
- controller_.Process(statistics, &rpiMetadata_);
-
- struct AgcStatus agcStatus;
- if (rpiMetadata_.Get("agc.status", agcStatus) == 0) {
- ControlList ctrls(sensorCtrls_);
- applyAGC(&agcStatus, ctrls);
-
- setDelayedControls.emit(ctrls);
- }
-}
-
-void IPARPi::applyAWB(const struct AwbStatus *awbStatus, ControlList &ctrls)
-{
- LOG(IPARPI, Debug) << "Applying WB R: " << awbStatus->gain_r << " B: "
- << awbStatus->gain_b;
-
- ctrls.set(V4L2_CID_RED_BALANCE,
- static_cast<int32_t>(awbStatus->gain_r * 1000));
- ctrls.set(V4L2_CID_BLUE_BALANCE,
- static_cast<int32_t>(awbStatus->gain_b * 1000));
-}
-
-void IPARPi::applyFrameDurations(Duration minFrameDuration, Duration maxFrameDuration)
-{
- const Duration minSensorFrameDuration = mode_.min_frame_length * mode_.line_length;
- const Duration maxSensorFrameDuration = mode_.max_frame_length * mode_.line_length;
-
- /*
- * This will only be applied once AGC recalculations occur.
- * The values may be clamped based on the sensor mode capabilities as well.
- */
- minFrameDuration_ = minFrameDuration ? minFrameDuration : defaultMaxFrameDuration;
- maxFrameDuration_ = maxFrameDuration ? maxFrameDuration : defaultMinFrameDuration;
- minFrameDuration_ = std::clamp(minFrameDuration_,
- minSensorFrameDuration, maxSensorFrameDuration);
- maxFrameDuration_ = std::clamp(maxFrameDuration_,
- minSensorFrameDuration, maxSensorFrameDuration);
- maxFrameDuration_ = std::max(maxFrameDuration_, minFrameDuration_);
-
- /* Return the validated limits via metadata. */
- libcameraMetadata_.set(controls::FrameDurationLimits,
- { static_cast<int64_t>(minFrameDuration_.get<std::micro>()),
- static_cast<int64_t>(maxFrameDuration_.get<std::micro>()) });
-
- /*
- * Calculate the maximum exposure time possible for the AGC to use.
- * GetVBlanking() will update maxShutter with the largest exposure
- * value possible.
- */
- Duration maxShutter = Duration::max();
- helper_->GetVBlanking(maxShutter, minFrameDuration_, maxFrameDuration_);
-
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.GetAlgorithm("agc"));
- agc->SetMaxShutter(maxShutter);
-}
-
-void IPARPi::applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls)
-{
- int32_t gainCode = helper_->GainCode(agcStatus->analogue_gain);
-
- /*
- * Ensure anything larger than the max gain code will not be passed to
- * DelayedControls. The AGC will correctly handle a lower gain returned
- * by the sensor, provided it knows the actual gain used.
- */
- gainCode = std::min<int32_t>(gainCode, maxSensorGainCode_);
-
- /* GetVBlanking might clip exposure time to the fps limits. */
- Duration exposure = agcStatus->shutter_time;
- int32_t vblanking = helper_->GetVBlanking(exposure, minFrameDuration_, maxFrameDuration_);
- int32_t exposureLines = helper_->ExposureLines(exposure);
-
- LOG(IPARPI, Debug) << "Applying AGC Exposure: " << exposure
- << " (Shutter lines: " << exposureLines << ", AGC requested "
- << agcStatus->shutter_time << ") Gain: "
- << agcStatus->analogue_gain << " (Gain Code: "
- << gainCode << ")";
-
- /*
- * Due to the behavior of V4L2, the current value of VBLANK could clip the
- * exposure time without us knowing. The next time though this function should
- * clip exposure correctly.
- */
- ctrls.set(V4L2_CID_VBLANK, vblanking);
- ctrls.set(V4L2_CID_EXPOSURE, exposureLines);
- ctrls.set(V4L2_CID_ANALOGUE_GAIN, gainCode);
-}
-
-void IPARPi::applyDG(const struct AgcStatus *dgStatus, ControlList &ctrls)
-{
- ctrls.set(V4L2_CID_DIGITAL_GAIN,
- static_cast<int32_t>(dgStatus->digital_gain * 1000));
-}
-
-void IPARPi::applyCCM(const struct CcmStatus *ccmStatus, ControlList &ctrls)
-{
- bcm2835_isp_custom_ccm ccm;
-
- for (int i = 0; i < 9; i++) {
- ccm.ccm.ccm[i / 3][i % 3].den = 1000;
- ccm.ccm.ccm[i / 3][i % 3].num = 1000 * ccmStatus->matrix[i];
- }
-
- ccm.enabled = 1;
- ccm.ccm.offsets[0] = ccm.ccm.offsets[1] = ccm.ccm.offsets[2] = 0;
-
- ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&ccm),
- sizeof(ccm) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_CC_MATRIX, c);
-}
-
-void IPARPi::applyGamma(const struct ContrastStatus *contrastStatus, ControlList &ctrls)
-{
- struct bcm2835_isp_gamma gamma;
-
- gamma.enabled = 1;
- for (int i = 0; i < CONTRAST_NUM_POINTS; i++) {
- gamma.x[i] = contrastStatus->points[i].x;
- gamma.y[i] = contrastStatus->points[i].y;
- }
-
- ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&gamma),
- sizeof(gamma) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_GAMMA, c);
-}
-
-void IPARPi::applyBlackLevel(const struct BlackLevelStatus *blackLevelStatus, ControlList &ctrls)
-{
- bcm2835_isp_black_level blackLevel;
-
- blackLevel.enabled = 1;
- blackLevel.black_level_r = blackLevelStatus->black_level_r;
- blackLevel.black_level_g = blackLevelStatus->black_level_g;
- blackLevel.black_level_b = blackLevelStatus->black_level_b;
-
- ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&blackLevel),
- sizeof(blackLevel) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL, c);
-}
-
-void IPARPi::applyGEQ(const struct GeqStatus *geqStatus, ControlList &ctrls)
-{
- bcm2835_isp_geq geq;
-
- geq.enabled = 1;
- geq.offset = geqStatus->offset;
- geq.slope.den = 1000;
- geq.slope.num = 1000 * geqStatus->slope;
-
- ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&geq),
- sizeof(geq) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_GEQ, c);
-}
-
-void IPARPi::applyDenoise(const struct DenoiseStatus *denoiseStatus, ControlList &ctrls)
-{
- using RPiController::DenoiseMode;
-
- bcm2835_isp_denoise denoise;
- DenoiseMode mode = static_cast<DenoiseMode>(denoiseStatus->mode);
-
- denoise.enabled = mode != DenoiseMode::Off;
- denoise.constant = denoiseStatus->noise_constant;
- denoise.slope.num = 1000 * denoiseStatus->noise_slope;
- denoise.slope.den = 1000;
- denoise.strength.num = 1000 * denoiseStatus->strength;
- denoise.strength.den = 1000;
-
- /* Set the CDN mode to match the SDN operating mode. */
- bcm2835_isp_cdn cdn;
- switch (mode) {
- case DenoiseMode::ColourFast:
- cdn.enabled = 1;
- cdn.mode = CDN_MODE_FAST;
- break;
- case DenoiseMode::ColourHighQuality:
- cdn.enabled = 1;
- cdn.mode = CDN_MODE_HIGH_QUALITY;
- break;
- default:
- cdn.enabled = 0;
- }
-
- ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&denoise),
- sizeof(denoise) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_DENOISE, c);
-
- c = ControlValue(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&cdn),
- sizeof(cdn) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_CDN, c);
-}
-
-void IPARPi::applySharpen(const struct SharpenStatus *sharpenStatus, ControlList &ctrls)
-{
- bcm2835_isp_sharpen sharpen;
-
- sharpen.enabled = 1;
- sharpen.threshold.num = 1000 * sharpenStatus->threshold;
- sharpen.threshold.den = 1000;
- sharpen.strength.num = 1000 * sharpenStatus->strength;
- sharpen.strength.den = 1000;
- sharpen.limit.num = 1000 * sharpenStatus->limit;
- sharpen.limit.den = 1000;
-
- ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&sharpen),
- sizeof(sharpen) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_SHARPEN, c);
-}
-
-void IPARPi::applyDPC(const struct DpcStatus *dpcStatus, ControlList &ctrls)
-{
- bcm2835_isp_dpc dpc;
-
- dpc.enabled = 1;
- dpc.strength = dpcStatus->strength;
-
- ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&dpc),
- sizeof(dpc) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_DPC, c);
-}
-
-void IPARPi::applyLS(const struct AlscStatus *lsStatus, ControlList &ctrls)
-{
- /*
- * Program lens shading tables into pipeline.
- * Choose smallest cell size that won't exceed 63x48 cells.
- */
- const int cellSizes[] = { 16, 32, 64, 128, 256 };
- unsigned int numCells = std::size(cellSizes);
- unsigned int i, w, h, cellSize;
- for (i = 0; i < numCells; i++) {
- cellSize = cellSizes[i];
- w = (mode_.width + cellSize - 1) / cellSize;
- h = (mode_.height + cellSize - 1) / cellSize;
- if (w < 64 && h <= 48)
- break;
- }
-
- if (i == numCells) {
- LOG(IPARPI, Error) << "Cannot find cell size";
- return;
- }
-
- /* We're going to supply corner sampled tables, 16 bit samples. */
- w++, h++;
- bcm2835_isp_lens_shading ls = {
- .enabled = 1,
- .grid_cell_size = cellSize,
- .grid_width = w,
- .grid_stride = w,
- .grid_height = h,
- /* .dmabuf will be filled in by pipeline handler. */
- .dmabuf = 0,
- .ref_transform = 0,
- .corner_sampled = 1,
- .gain_format = GAIN_FORMAT_U4P10
- };
-
- if (!lsTable_ || w * h * 4 * sizeof(uint16_t) > MaxLsGridSize) {
- LOG(IPARPI, Error) << "Do not have a correctly allocate lens shading table!";
- return;
- }
-
- if (lsStatus) {
- /* Format will be u4.10 */
- uint16_t *grid = static_cast<uint16_t *>(lsTable_);
-
- resampleTable(grid, lsStatus->r, w, h);
- resampleTable(grid + w * h, lsStatus->g, w, h);
- std::memcpy(grid + 2 * w * h, grid + w * h, w * h * sizeof(uint16_t));
- resampleTable(grid + 3 * w * h, lsStatus->b, w, h);
- }
-
- ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&ls),
- sizeof(ls) });
- ctrls.set(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING, c);
-}
-
-/*
- * Resamples a 16x12 table with central sampling to destW x destH with corner
- * sampling.
- */
-void IPARPi::resampleTable(uint16_t dest[], double const src[12][16],
- int destW, int destH)
-{
- /*
- * Precalculate and cache the x sampling locations and phases to
- * save recomputing them on every row.
- */
- assert(destW > 1 && destH > 1 && destW <= 64);
- int xLo[64], xHi[64];
- double xf[64];
- double x = -0.5, xInc = 16.0 / (destW - 1);
- for (int i = 0; i < destW; i++, x += xInc) {
- xLo[i] = floor(x);
- xf[i] = x - xLo[i];
- xHi[i] = xLo[i] < 15 ? xLo[i] + 1 : 15;
- xLo[i] = xLo[i] > 0 ? xLo[i] : 0;
- }
-
- /* Now march over the output table generating the new values. */
- double y = -0.5, yInc = 12.0 / (destH - 1);
- for (int j = 0; j < destH; j++, y += yInc) {
- int yLo = floor(y);
- double yf = y - yLo;
- int yHi = yLo < 11 ? yLo + 1 : 11;
- yLo = yLo > 0 ? yLo : 0;
- double const *rowAbove = src[yLo];
- double const *rowBelow = src[yHi];
- for (int i = 0; i < destW; i++) {
- double above = rowAbove[xLo[i]] * (1 - xf[i]) + rowAbove[xHi[i]] * xf[i];
- double below = rowBelow[xLo[i]] * (1 - xf[i]) + rowBelow[xHi[i]] * xf[i];
- int result = floor(1024 * (above * (1 - yf) + below * yf) + .5);
- *(dest++) = result > 16383 ? 16383 : result; /* want u4.10 */
- }
- }
-}
-
-} /* namespace ipa::RPi */
-
-/*
- * External IPA module interface
- */
-extern "C" {
-const struct IPAModuleInfo ipaModuleInfo = {
- IPA_MODULE_API_VERSION,
- 1,
- "PipelineHandlerRPi",
- "raspberrypi",
-};
-
-IPAInterface *ipaCreate()
-{
- return new ipa::RPi::IPARPi();
-}
-
-} /* extern "C" */
-
-} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/agc.cpp b/src/ipa/rkisp1/algorithms/agc.cpp
index a1bb7d97..50e0690f 100644
--- a/src/ipa/rkisp1/algorithms/agc.cpp
+++ b/src/ipa/rkisp1/algorithms/agc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * agc.cpp - AGC/AEC mean-based control algorithm
+ * AGC/AEC mean-based control algorithm
*/
#include "agc.h"
@@ -14,6 +14,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include <libcamera/control_ids.h>
#include <libcamera/ipa/core_ipa_interface.h>
#include "libipa/histogram.h"
@@ -35,32 +36,32 @@ namespace ipa::rkisp1::algorithms {
LOG_DEFINE_CATEGORY(RkISP1Agc)
-/* Limits for analogue gain values */
-static constexpr double kMinAnalogueGain = 1.0;
-static constexpr double kMaxAnalogueGain = 8.0;
-
-/* \todo Honour the FrameDurationLimits control instead of hardcoding a limit */
-static constexpr utils::Duration kMaxShutterSpeed = 60ms;
-
-/* Number of frames to wait before calculating stats on minimum exposure */
-static constexpr uint32_t kNumStartupFrames = 10;
-
-/* Target value to reach for the top 2% of the histogram */
-static constexpr double kEvGainTarget = 0.5;
+Agc::Agc()
+{
+ supportsRaw_ = true;
+}
-/*
- * Relative luminance target.
+/**
+ * \brief Initialise the AGC algorithm from tuning files
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The YamlObject containing Agc tuning data
*
- * It's a number that's chosen so that, when the camera points at a grey
- * target, the resulting image brightness is considered right.
+ * This function calls the base class' tuningData parsers to discover which
+ * control values are supported.
*
- * \todo Why is the value different between IPU3 and RkISP1 ?
+ * \return 0 on success or errors from the base class
*/
-static constexpr double kRelativeLuminanceTarget = 0.4;
-
-Agc::Agc()
- : frameCount_(0), numCells_(0), numHistBins_(0), filteredExposure_(0s)
+int Agc::init(IPAContext &context, const YamlObject &tuningData)
{
+ int ret;
+
+ ret = parseTuningData(tuningData);
+ if (ret)
+ return ret;
+
+ context.ctrlMap.merge(controls());
+
+ return 0;
}
/**
@@ -73,21 +74,15 @@ Agc::Agc()
int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo)
{
/* Configure the default exposure and gain. */
- context.frameContext.agc.gain = std::max(context.configuration.agc.minAnalogueGain, kMinAnalogueGain);
- context.frameContext.agc.exposure = 10ms / context.configuration.sensor.lineDuration;
+ context.activeState.agc.automatic.gain = context.configuration.sensor.minAnalogueGain;
+ context.activeState.agc.automatic.exposure =
+ 10ms / context.configuration.sensor.lineDuration;
+ context.activeState.agc.manual.gain = context.activeState.agc.automatic.gain;
+ context.activeState.agc.manual.exposure = context.activeState.agc.automatic.exposure;
+ context.activeState.agc.autoEnabled = !context.configuration.raw;
- /*
- * According to the RkISP1 documentation:
- * - versions < V12 have RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries,
- * - versions >= V12 have RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries.
- */
- if (context.configuration.hw.revision < RKISP1_V12) {
- numCells_ = RKISP1_CIF_ISP_AE_MEAN_MAX_V10;
- numHistBins_ = RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10;
- } else {
- numCells_ = RKISP1_CIF_ISP_AE_MEAN_MAX_V12;
- numHistBins_ = RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12;
- }
+ context.activeState.agc.constraintMode = constraintModes().begin()->first;
+ context.activeState.agc.exposureMode = exposureModeHelpers().begin()->first;
/*
* Define the measurement window for AGC as a centered rectangle
@@ -98,131 +93,125 @@ int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo)
context.configuration.agc.measureWindow.h_size = 3 * configInfo.outputSize.width / 4;
context.configuration.agc.measureWindow.v_size = 3 * configInfo.outputSize.height / 4;
- /* \todo Use actual frame index by populating it in the frameContext. */
- frameCount_ = 0;
+ /* \todo Run this again when FrameDurationLimits is passed in */
+ setLimits(context.configuration.sensor.minShutterSpeed,
+ context.configuration.sensor.maxShutterSpeed,
+ context.configuration.sensor.minAnalogueGain,
+ context.configuration.sensor.maxAnalogueGain);
+
+ resetFrameCount();
+
return 0;
}
/**
- * \brief Apply a filter on the exposure value to limit the speed of changes
- * \param[in] exposureValue The target exposure from the AGC algorithm
- *
- * The speed of the filter is adaptive, and will produce the target quicker
- * during startup, or when the target exposure is within 20% of the most recent
- * filter output.
- *
- * \return The filtered exposure
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
*/
-utils::Duration Agc::filterExposure(utils::Duration exposureValue)
+void Agc::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
{
- double speed = 0.2;
+ auto &agc = context.activeState.agc;
- /* Adapt instantly if we are in startup phase. */
- if (frameCount_ < kNumStartupFrames)
- speed = 1.0;
+ if (!context.configuration.raw) {
+ const auto &agcEnable = controls.get(controls::AeEnable);
+ if (agcEnable && *agcEnable != agc.autoEnabled) {
+ agc.autoEnabled = *agcEnable;
- /*
- * If we are close to the desired result, go faster to avoid making
- * multiple micro-adjustments.
- * \todo Make this customisable?
- */
- if (filteredExposure_ < 1.2 * exposureValue &&
- filteredExposure_ > 0.8 * exposureValue)
- speed = sqrt(speed);
+ LOG(RkISP1Agc, Debug)
+ << (agc.autoEnabled ? "Enabling" : "Disabling")
+ << " AGC";
+ }
+ }
- filteredExposure_ = speed * exposureValue +
- filteredExposure_ * (1.0 - speed);
+ const auto &exposure = controls.get(controls::ExposureTime);
+ if (exposure && !agc.autoEnabled) {
+ agc.manual.exposure = *exposure * 1.0us
+ / context.configuration.sensor.lineDuration;
+
+ LOG(RkISP1Agc, Debug)
+ << "Set exposure to " << agc.manual.exposure;
+ }
+
+ const auto &gain = controls.get(controls::AnalogueGain);
+ if (gain && !agc.autoEnabled) {
+ agc.manual.gain = *gain;
+
+ LOG(RkISP1Agc, Debug) << "Set gain to " << agc.manual.gain;
+ }
- LOG(RkISP1Agc, Debug) << "After filtering, exposure " << filteredExposure_;
+ frameContext.agc.autoEnabled = agc.autoEnabled;
- return filteredExposure_;
+ if (!frameContext.agc.autoEnabled) {
+ frameContext.agc.exposure = agc.manual.exposure;
+ frameContext.agc.gain = agc.manual.gain;
+ }
}
/**
- * \brief Estimate the new exposure and gain values
- * \param[inout] frameContext The shared IPA frame Context
- * \param[in] yGain The gain calculated on the current brightness level
- * \param[in] iqMeanGain The gain calculated based on the relative luminance target
+ * \copydoc libcamera::ipa::Algorithm::prepare
*/
-void Agc::computeExposure(IPAContext &context, double yGain, double iqMeanGain)
+void Agc::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, rkisp1_params_cfg *params)
{
- IPASessionConfiguration &configuration = context.configuration;
- IPAFrameContext &frameContext = context.frameContext;
-
- /* Get the effective exposure and gain applied on the sensor. */
- uint32_t exposure = frameContext.sensor.exposure;
- double analogueGain = frameContext.sensor.gain;
-
- /* Use the highest of the two gain estimates. */
- double evGain = std::max(yGain, iqMeanGain);
-
- utils::Duration minShutterSpeed = configuration.agc.minShutterSpeed;
- utils::Duration maxShutterSpeed = std::min(configuration.agc.maxShutterSpeed,
- kMaxShutterSpeed);
-
- double minAnalogueGain = std::max(configuration.agc.minAnalogueGain,
- kMinAnalogueGain);
- double maxAnalogueGain = std::min(configuration.agc.maxAnalogueGain,
- kMaxAnalogueGain);
+ if (frameContext.agc.autoEnabled) {
+ frameContext.agc.exposure = context.activeState.agc.automatic.exposure;
+ frameContext.agc.gain = context.activeState.agc.automatic.gain;
+ }
- /* Consider within 1% of the target as correctly exposed. */
- if (utils::abs_diff(evGain, 1.0) < 0.01)
+ if (frame > 0)
return;
- /* extracted from Rpi::Agc::computeTargetExposure. */
-
- /* Calculate the shutter time in seconds. */
- utils::Duration currentShutter = exposure * configuration.sensor.lineDuration;
-
- /*
- * Update the exposure value for the next computation using the values
- * of exposure and gain really used by the sensor.
- */
- utils::Duration effectiveExposureValue = currentShutter * analogueGain;
-
- LOG(RkISP1Agc, Debug) << "Actual total exposure " << currentShutter * analogueGain
- << " Shutter speed " << currentShutter
- << " Gain " << analogueGain
- << " Needed ev gain " << evGain;
-
- /*
- * Calculate the current exposure value for the scene as the latest
- * exposure value applied multiplied by the new estimated gain.
- */
- utils::Duration exposureValue = effectiveExposureValue * evGain;
+ /* Configure the measurement window. */
+ params->meas.aec_config.meas_window = context.configuration.agc.measureWindow;
+ /* Use a continuous method for measure. */
+ params->meas.aec_config.autostop = RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0;
+ /* Estimate Y as (R + G + B) x (85/256). */
+ params->meas.aec_config.mode = RKISP1_CIF_ISP_EXP_MEASURING_MODE_1;
- /* Clamp the exposure value to the min and max authorized. */
- utils::Duration maxTotalExposure = maxShutterSpeed * maxAnalogueGain;
- exposureValue = std::min(exposureValue, maxTotalExposure);
- LOG(RkISP1Agc, Debug) << "Target total exposure " << exposureValue
- << ", maximum is " << maxTotalExposure;
+ params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AEC;
+ params->module_ens |= RKISP1_CIF_ISP_MODULE_AEC;
+ params->module_en_update |= RKISP1_CIF_ISP_MODULE_AEC;
- /*
- * Divide the exposure value as new exposure and gain values.
- * \todo estimate if we need to desaturate
- */
- exposureValue = filterExposure(exposureValue);
+ /* Configure histogram. */
+ params->meas.hst_config.meas_window = context.configuration.agc.measureWindow;
+ /* Produce the luminance histogram. */
+ params->meas.hst_config.mode = RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM;
+ /* Set an average weighted histogram. */
+ Span<uint8_t> weights{
+ params->meas.hst_config.hist_weight,
+ context.hw->numHistogramWeights
+ };
+ std::fill(weights.begin(), weights.end(), 1);
+ /* Step size can't be less than 3. */
+ params->meas.hst_config.histogram_predivider = 4;
- /*
- * Push the shutter time up to the maximum first, and only then
- * increase the gain.
- */
- utils::Duration shutterTime = std::clamp<utils::Duration>(exposureValue / minAnalogueGain,
- minShutterSpeed, maxShutterSpeed);
- double stepGain = std::clamp(exposureValue / shutterTime,
- minAnalogueGain, maxAnalogueGain);
- LOG(RkISP1Agc, Debug) << "Divided up shutter and gain are "
- << shutterTime << " and "
- << stepGain;
+ /* Update the configuration for histogram. */
+ params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_HST;
+ /* Enable the histogram measure unit. */
+ params->module_ens |= RKISP1_CIF_ISP_MODULE_HST;
+ params->module_en_update |= RKISP1_CIF_ISP_MODULE_HST;
+}
- /* Update the estimated exposure and gain. */
- frameContext.agc.exposure = shutterTime / configuration.sensor.lineDuration;
- frameContext.agc.gain = stepGain;
+void Agc::fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
+ ControlList &metadata)
+{
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ metadata.set(controls::AnalogueGain, frameContext.sensor.gain);
+ metadata.set(controls::ExposureTime, exposureTime.get<std::micro>());
+
+ /* \todo Use VBlank value calculated from each frame exposure. */
+ uint32_t vTotal = context.configuration.sensor.size.height
+ + context.configuration.sensor.defVBlank;
+ utils::Duration frameDuration = context.configuration.sensor.lineDuration
+ * vTotal;
+ metadata.set(controls::FrameDuration, frameDuration.get<std::micro>());
}
/**
* \brief Estimate the relative luminance of the frame with a given gain
- * \param[in] ae The RkISP1 statistics and ISP results
* \param[in] gain The gain to apply to the frame
*
* This function estimates the average relative luminance of the frame that
@@ -236,8 +225,6 @@ void Agc::computeExposure(IPAContext &context, double yGain, double iqMeanGain)
* YUV doesn't take into account the fact that the R, G and B components
* contribute differently to the relative luminance.
*
- * \todo Have a dedicated YUV algorithm ?
- *
* The values are normalized to the [0.0, 1.0] range, where 1.0 corresponds to a
* theoretical perfect reflector of 100% reference white.
*
@@ -246,113 +233,82 @@ void Agc::computeExposure(IPAContext &context, double yGain, double iqMeanGain)
*
* \return The relative luminance
*/
-double Agc::estimateLuminance(const rkisp1_cif_isp_ae_stat *ae,
- double gain)
+double Agc::estimateLuminance(double gain) const
{
double ySum = 0.0;
/* Sum the averages, saturated to 255. */
- for (unsigned int aeCell = 0; aeCell < numCells_; aeCell++)
- ySum += std::min(ae->exp_mean[aeCell] * gain, 255.0);
+ for (uint8_t expMean : expMeans_)
+ ySum += std::min(expMean * gain, 255.0);
/* \todo Weight with the AWB gains */
- return ySum / numCells_ / 255;
-}
-
-/**
- * \brief Estimate the mean value of the top 2% of the histogram
- * \param[in] hist The histogram statistics computed by the ImgU
- * \return The mean value of the top 2% of the histogram
- */
-double Agc::measureBrightness(const rkisp1_cif_isp_hist_stat *hist) const
-{
- Histogram histogram{ Span<const uint32_t>(hist->hist_bins, numHistBins_) };
- /* Estimate the quantile mean of the top 2% of the histogram. */
- return histogram.interQuantileMean(0.98, 1.0);
+ return ySum / expMeans_.size() / 255;
}
/**
* \brief Process RkISP1 statistics, and run AGC operations
* \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The current frame context
* \param[in] stats The RKISP1 statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
*
* Identify the current image brightness, and use that to estimate the optimal
* new exposure and gain for the scene.
*/
-void Agc::process(IPAContext &context,
- [[maybe_unused]] IPAFrameContext *frameContext,
- const rkisp1_stat_buffer *stats)
+void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext, const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
{
+ if (!stats) {
+ fillMetadata(context, frameContext, metadata);
+ return;
+ }
+
+ /*
+ * \todo Verify that the exposure and gain applied by the sensor for
+ * this frame match what has been requested. This isn't a hard
+ * requirement for stability of the AGC (the guarantee we need in
+ * automatic mode is a perfect match between the frame and the values
+ * we receive), but is important in manual mode.
+ */
+
const rkisp1_cif_isp_stat *params = &stats->params;
ASSERT(stats->meas_type & RKISP1_CIF_ISP_STAT_AUTOEXP);
- const rkisp1_cif_isp_ae_stat *ae = &params->ae;
- const rkisp1_cif_isp_hist_stat *hist = &params->hist;
-
- double iqMean = measureBrightness(hist);
- double iqMeanGain = kEvGainTarget * numHistBins_ / iqMean;
+ /* The lower 4 bits are fractional and meant to be discarded. */
+ Histogram hist({ params->hist.hist_bins, context.hw->numHistogramBins },
+ [](uint32_t x) { return x >> 4; });
+ expMeans_ = { params->ae.exp_mean, context.hw->numAeCells };
/*
- * Estimate the gain needed to achieve a relative luminance target. To
- * account for non-linearity caused by saturation, the value needs to be
- * estimated in an iterative process, as multiplying by a gain will not
- * increase the relative luminance by the same factor if some image
- * regions are saturated.
+ * The Agc algorithm needs to know the effective exposure value that was
+ * applied to the sensor when the statistics were collected.
*/
- double yGain = 1.0;
- double yTarget = kRelativeLuminanceTarget;
-
- for (unsigned int i = 0; i < 8; i++) {
- double yValue = estimateLuminance(ae, yGain);
- double extra_gain = std::min(10.0, yTarget / (yValue + .001));
-
- yGain *= extra_gain;
- LOG(RkISP1Agc, Debug) << "Y value: " << yValue
- << ", Y target: " << yTarget
- << ", gives gain " << yGain;
- if (extra_gain < 1.01)
- break;
- }
-
- computeExposure(context, yGain, iqMeanGain);
- frameCount_++;
-}
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ double analogueGain = frameContext.sensor.gain;
+ utils::Duration effectiveExposureValue = exposureTime * analogueGain;
-/**
- * \copydoc libcamera::ipa::Algorithm::prepare
- */
-void Agc::prepare(IPAContext &context, rkisp1_params_cfg *params)
-{
- if (context.frameContext.frameCount > 0)
- return;
+ utils::Duration shutterTime;
+ double aGain, dGain;
+ std::tie(shutterTime, aGain, dGain) =
+ calculateNewEv(context.activeState.agc.constraintMode,
+ context.activeState.agc.exposureMode,
+ hist, effectiveExposureValue);
- /* Configure the measurement window. */
- params->meas.aec_config.meas_window = context.configuration.agc.measureWindow;
- /* Use a continuous method for measure. */
- params->meas.aec_config.autostop = RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0;
- /* Estimate Y as (R + G + B) x (85/256). */
- params->meas.aec_config.mode = RKISP1_CIF_ISP_EXP_MEASURING_MODE_1;
+ LOG(RkISP1Agc, Debug)
+ << "Divided up shutter, analogue gain and digital gain are "
+ << shutterTime << ", " << aGain << " and " << dGain;
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AEC;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_AEC;
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_AEC;
-
- /* Configure histogram. */
- params->meas.hst_config.meas_window = context.configuration.agc.measureWindow;
- /* Produce the luminance histogram. */
- params->meas.hst_config.mode = RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM;
- /* Set an average weighted histogram. */
- for (unsigned int histBin = 0; histBin < numHistBins_; histBin++)
- params->meas.hst_config.hist_weight[histBin] = 1;
- /* Step size can't be less than 3. */
- params->meas.hst_config.histogram_predivider = 4;
+ IPAActiveState &activeState = context.activeState;
+ /* Update the estimated exposure and gain. */
+ activeState.agc.automatic.exposure = shutterTime / context.configuration.sensor.lineDuration;
+ activeState.agc.automatic.gain = aGain;
- /* Update the configuration for histogram. */
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_HST;
- /* Enable the histogram measure unit. */
- params->module_ens |= RKISP1_CIF_ISP_MODULE_HST;
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_HST;
+ fillMetadata(context, frameContext, metadata);
+ expMeans_ = {};
}
REGISTER_IPA_ALGORITHM(Agc, "Agc")
diff --git a/src/ipa/rkisp1/algorithms/agc.h b/src/ipa/rkisp1/algorithms/agc.h
index 22c02779..04b3247e 100644
--- a/src/ipa/rkisp1/algorithms/agc.h
+++ b/src/ipa/rkisp1/algorithms/agc.h
@@ -2,48 +2,53 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * agc.h - RkISP1 AGC/AEC mean-based control algorithm
+ * RkISP1 AGC/AEC mean-based control algorithm
*/
#pragma once
#include <linux/rkisp1-config.h>
+#include <libcamera/base/span.h>
#include <libcamera/base/utils.h>
#include <libcamera/geometry.h>
+#include "libipa/agc_mean_luminance.h"
+#include "libipa/histogram.h"
+
#include "algorithm.h"
namespace libcamera {
-struct IPACameraSensorInfo;
-
namespace ipa::rkisp1::algorithms {
-class Agc : public Algorithm
+class Agc : public Algorithm, public AgcMeanLuminance
{
public:
Agc();
~Agc() = default;
+ int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
- void prepare(IPAContext &context, rkisp1_params_cfg *params) override;
- void process(IPAContext &context, IPAFrameContext *frameContext,
- const rkisp1_stat_buffer *stats) override;
+ void queueRequest(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
private:
- void computeExposure(IPAContext &Context, double yGain, double iqMeanGain);
- utils::Duration filterExposure(utils::Duration exposureValue);
- double estimateLuminance(const rkisp1_cif_isp_ae_stat *ae, double gain);
- double measureBrightness(const rkisp1_cif_isp_hist_stat *hist) const;
-
- uint64_t frameCount_;
-
- uint32_t numCells_;
- uint32_t numHistBins_;
+ void fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
+ ControlList &metadata);
+ double estimateLuminance(double gain) const override;
- utils::Duration filteredExposure_;
+ Span<const uint8_t> expMeans_;
};
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/algorithm.h b/src/ipa/rkisp1/algorithms/algorithm.h
index c3212cff..715cfcd8 100644
--- a/src/ipa/rkisp1/algorithms/algorithm.h
+++ b/src/ipa/rkisp1/algorithms/algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.h - RkISP1 control algorithm interface
+ * RkISP1 control algorithm interface
*/
#pragma once
@@ -15,7 +15,17 @@ namespace libcamera {
namespace ipa::rkisp1 {
-using Algorithm = libcamera::ipa::Algorithm<Module>;
+class Algorithm : public libcamera::ipa::Algorithm<Module>
+{
+public:
+ Algorithm()
+ : disabled_(false), supportsRaw_(false)
+ {
+ }
+
+ bool disabled_;
+ bool supportsRaw_;
+};
} /* namespace ipa::rkisp1 */
diff --git a/src/ipa/rkisp1/algorithms/awb.cpp b/src/ipa/rkisp1/algorithms/awb.cpp
index 9f00364d..a01fe5d9 100644
--- a/src/ipa/rkisp1/algorithms/awb.cpp
+++ b/src/ipa/rkisp1/algorithms/awb.cpp
@@ -2,16 +2,18 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * awb.cpp - AWB control algorithm
+ * AWB control algorithm
*/
#include "awb.h"
#include <algorithm>
#include <cmath>
+#include <iomanip>
#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
#include <libcamera/ipa/core_ipa_interface.h>
/**
@@ -29,15 +31,27 @@ namespace ipa::rkisp1::algorithms {
LOG_DEFINE_CATEGORY(RkISP1Awb)
+/* Minimum mean value below which AWB can't operate. */
+constexpr double kMeanMinThreshold = 2.0;
+
+Awb::Awb()
+ : rgbMode_(false)
+{
+}
+
/**
* \copydoc libcamera::ipa::Algorithm::configure
*/
int Awb::configure(IPAContext &context,
const IPACameraSensorInfo &configInfo)
{
- context.frameContext.awb.gains.red = 1.0;
- context.frameContext.awb.gains.blue = 1.0;
- context.frameContext.awb.gains.green = 1.0;
+ context.activeState.awb.gains.manual.red = 1.0;
+ context.activeState.awb.gains.manual.blue = 1.0;
+ context.activeState.awb.gains.manual.green = 1.0;
+ context.activeState.awb.gains.automatic.red = 1.0;
+ context.activeState.awb.gains.automatic.blue = 1.0;
+ context.activeState.awb.gains.automatic.green = 1.0;
+ context.activeState.awb.autoEnabled = true;
/*
* Define the measurement window for AWB as a centered rectangle
@@ -48,131 +62,264 @@ int Awb::configure(IPAContext &context,
context.configuration.awb.measureWindow.h_size = 3 * configInfo.outputSize.width / 4;
context.configuration.awb.measureWindow.v_size = 3 * configInfo.outputSize.height / 4;
+ context.configuration.awb.enabled = true;
+
return 0;
}
-uint32_t Awb::estimateCCT(double red, double green, double blue)
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void Awb::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
{
- /* Convert the RGB values to CIE tristimulus values (XYZ) */
- double X = (-0.14282) * (red) + (1.54924) * (green) + (-0.95641) * (blue);
- double Y = (-0.32466) * (red) + (1.57837) * (green) + (-0.73191) * (blue);
- double Z = (-0.68202) * (red) + (0.77073) * (green) + (0.56332) * (blue);
+ auto &awb = context.activeState.awb;
- /* Calculate the normalized chromaticity values */
- double x = X / (X + Y + Z);
- double y = Y / (X + Y + Z);
+ const auto &awbEnable = controls.get(controls::AwbEnable);
+ if (awbEnable && *awbEnable != awb.autoEnabled) {
+ awb.autoEnabled = *awbEnable;
- /* Calculate CCT */
- double n = (x - 0.3320) / (0.1858 - y);
- return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
+ LOG(RkISP1Awb, Debug)
+ << (*awbEnable ? "Enabling" : "Disabling") << " AWB";
+ }
+
+ const auto &colourGains = controls.get(controls::ColourGains);
+ if (colourGains && !awb.autoEnabled) {
+ awb.gains.manual.red = (*colourGains)[0];
+ awb.gains.manual.blue = (*colourGains)[1];
+
+ LOG(RkISP1Awb, Debug)
+ << "Set colour gains to red: " << awb.gains.manual.red
+ << ", blue: " << awb.gains.manual.blue;
+ }
+
+ frameContext.awb.autoEnabled = awb.autoEnabled;
+
+ if (!awb.autoEnabled) {
+ frameContext.awb.gains.red = awb.gains.manual.red;
+ frameContext.awb.gains.green = 1.0;
+ frameContext.awb.gains.blue = awb.gains.manual.blue;
+ }
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
-void Awb::prepare(IPAContext &context, rkisp1_params_cfg *params)
+void Awb::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, rkisp1_params_cfg *params)
{
- params->others.awb_gain_config.gain_green_b = 256 * context.frameContext.awb.gains.green;
- params->others.awb_gain_config.gain_blue = 256 * context.frameContext.awb.gains.blue;
- params->others.awb_gain_config.gain_red = 256 * context.frameContext.awb.gains.red;
- params->others.awb_gain_config.gain_green_r = 256 * context.frameContext.awb.gains.green;
+ /*
+ * This is the latest time we can read the active state. This is the
+ * most up-to-date automatic values we can read.
+ */
+ if (frameContext.awb.autoEnabled) {
+ frameContext.awb.gains.red = context.activeState.awb.gains.automatic.red;
+ frameContext.awb.gains.green = context.activeState.awb.gains.automatic.green;
+ frameContext.awb.gains.blue = context.activeState.awb.gains.automatic.blue;
+ }
+
+ params->others.awb_gain_config.gain_green_b = 256 * frameContext.awb.gains.green;
+ params->others.awb_gain_config.gain_blue = 256 * frameContext.awb.gains.blue;
+ params->others.awb_gain_config.gain_red = 256 * frameContext.awb.gains.red;
+ params->others.awb_gain_config.gain_green_r = 256 * frameContext.awb.gains.green;
/* Update the gains. */
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
- /* If we already have configured the gains and window, return. */
- if (context.frameContext.frameCount > 0)
+ /* If we have already set the AWB measurement parameters, return. */
+ if (frame > 0)
return;
- /* Configure the gains to apply. */
+ rkisp1_cif_isp_awb_meas_config &awb_config = params->meas.awb_meas_config;
+
+ /* Configure the measure window for AWB. */
+ awb_config.awb_wnd = context.configuration.awb.measureWindow;
+
+ /* Number of frames to use to estimate the means (0 means 1 frame). */
+ awb_config.frames = 0;
+
+ /* Select RGB or YCbCr means measurement. */
+ if (rgbMode_) {
+ awb_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_RGB;
+
+ /*
+ * For RGB-based measurements, pixels are selected with maximum
+ * red, green and blue thresholds that are set in the
+ * awb_ref_cr, awb_min_y and awb_ref_cb respectively. The other
+ * values are not used, set them to 0.
+ */
+ awb_config.awb_ref_cr = 250;
+ awb_config.min_y = 250;
+ awb_config.awb_ref_cb = 250;
+
+ awb_config.max_y = 0;
+ awb_config.min_c = 0;
+ awb_config.max_csum = 0;
+ } else {
+ awb_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_YCBCR;
+
+ /* Set the reference Cr and Cb (AWB target) to white. */
+ awb_config.awb_ref_cb = 128;
+ awb_config.awb_ref_cr = 128;
+
+ /*
+ * Filter out pixels based on luminance and chrominance values.
+ * The acceptable luma values are specified as a [16, 250]
+ * range, while the acceptable chroma values are specified with
+ * a minimum of 16 and a maximum Cb+Cr sum of 250.
+ */
+ awb_config.min_y = 16;
+ awb_config.max_y = 250;
+ awb_config.min_c = 16;
+ awb_config.max_csum = 250;
+ }
+
+ /* Enable the AWB gains. */
params->module_en_update |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
- /* Update the ISP to apply the gains configured. */
params->module_ens |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
- /* Configure the measure window for AWB. */
- params->meas.awb_meas_config.awb_wnd = context.configuration.awb.measureWindow;
- /*
- * Measure Y, Cr and Cb means.
- * \todo RGB is not working, the kernel seems to not configure it ?
- */
- params->meas.awb_meas_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_YCBCR;
- /* Reference Cr and Cb. */
- params->meas.awb_meas_config.awb_ref_cb = 128;
- params->meas.awb_meas_config.awb_ref_cr = 128;
- /* Y values to include are between min_y and max_y only. */
- params->meas.awb_meas_config.min_y = 16;
- params->meas.awb_meas_config.max_y = 250;
- /* Maximum Cr+Cb value to take into account for awb. */
- params->meas.awb_meas_config.max_csum = 250;
- /* Minimum Cr and Cb values to take into account. */
- params->meas.awb_meas_config.min_c = 16;
- /* Number of frames to use to estimate the mean (0 means 1 frame). */
- params->meas.awb_meas_config.frames = 0;
-
- /* Update AWB measurement unit configuration. */
+ /* Update the AWB measurement parameters and enable the AWB module. */
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AWB;
- /* Make sure the ISP is measuring the means for the next frame. */
params->module_en_update |= RKISP1_CIF_ISP_MODULE_AWB;
params->module_ens |= RKISP1_CIF_ISP_MODULE_AWB;
}
+uint32_t Awb::estimateCCT(double red, double green, double blue)
+{
+ /* Convert the RGB values to CIE tristimulus values (XYZ) */
+ double X = (-0.14282) * (red) + (1.54924) * (green) + (-0.95641) * (blue);
+ double Y = (-0.32466) * (red) + (1.57837) * (green) + (-0.73191) * (blue);
+ double Z = (-0.68202) * (red) + (0.77073) * (green) + (0.56332) * (blue);
+
+ /* Calculate the normalized chromaticity values */
+ double x = X / (X + Y + Z);
+ double y = Y / (X + Y + Z);
+
+ /* Calculate CCT */
+ double n = (x - 0.3320) / (0.1858 - y);
+ return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
+}
+
/**
* \copydoc libcamera::ipa::Algorithm::process
*/
-void Awb::process([[maybe_unused]] IPAContext &context,
- [[maybe_unused]] IPAFrameContext *frameCtx,
- const rkisp1_stat_buffer *stats)
+void Awb::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
{
const rkisp1_cif_isp_stat *params = &stats->params;
const rkisp1_cif_isp_awb_stat *awb = &params->awb;
- IPAFrameContext &frameContext = context.frameContext;
-
- /* Get the YCbCr mean values */
- double yMean = awb->awb_mean[0].mean_y_or_g;
- double crMean = awb->awb_mean[0].mean_cr_or_r;
- double cbMean = awb->awb_mean[0].mean_cb_or_b;
+ IPAActiveState &activeState = context.activeState;
+ double greenMean;
+ double redMean;
+ double blueMean;
+
+ if (rgbMode_) {
+ greenMean = awb->awb_mean[0].mean_y_or_g;
+ redMean = awb->awb_mean[0].mean_cr_or_r;
+ blueMean = awb->awb_mean[0].mean_cb_or_b;
+ } else {
+ /* Get the YCbCr mean values */
+ double yMean = awb->awb_mean[0].mean_y_or_g;
+ double cbMean = awb->awb_mean[0].mean_cb_or_b;
+ double crMean = awb->awb_mean[0].mean_cr_or_r;
+
+ /*
+ * Convert from YCbCr to RGB.
+ * The hardware uses the following formulas:
+ * Y = 16 + 0.2500 R + 0.5000 G + 0.1094 B
+ * Cb = 128 - 0.1406 R - 0.2969 G + 0.4375 B
+ * Cr = 128 + 0.4375 R - 0.3750 G - 0.0625 B
+ *
+ * The inverse matrix is thus:
+ * [[1,1636, -0,0623, 1,6008]
+ * [1,1636, -0,4045, -0,7949]
+ * [1,1636, 1,9912, -0,0250]]
+ */
+ yMean -= 16;
+ cbMean -= 128;
+ crMean -= 128;
+ redMean = 1.1636 * yMean - 0.0623 * cbMean + 1.6008 * crMean;
+ greenMean = 1.1636 * yMean - 0.4045 * cbMean - 0.7949 * crMean;
+ blueMean = 1.1636 * yMean + 1.9912 * cbMean - 0.0250 * crMean;
+
+ /*
+ * Due to hardware rounding errors in the YCbCr means, the
+ * calculated RGB means may be negative. This would lead to
+ * negative gains, messing up calculation. Prevent this by
+ * clamping the means to positive values.
+ */
+ redMean = std::max(redMean, 0.0);
+ greenMean = std::max(greenMean, 0.0);
+ blueMean = std::max(blueMean, 0.0);
+ }
/*
- * Convert from YCbCr to RGB.
- * The hardware uses the following formulas:
- * Y = 16 + 0.2500 R + 0.5000 G + 0.1094 B
- * Cb = 128 - 0.1406 R - 0.2969 G + 0.4375 B
- * Cr = 128 + 0.4375 R - 0.3750 G - 0.0625 B
- *
- * The inverse matrix is thus:
- * [[1,1636, -0,0623, 1,6008]
- * [1,1636, -0,4045, -0,7949]
- * [1,1636, 1,9912, -0,0250]]
+ * The ISP computes the AWB means after applying the colour gains,
+ * divide by the gains that were used to get the raw means from the
+ * sensor.
*/
- yMean -= 16;
- cbMean -= 128;
- crMean -= 128;
- double redMean = 1.1636 * yMean - 0.0623 * cbMean + 1.6008 * crMean;
- double greenMean = 1.1636 * yMean - 0.4045 * cbMean - 0.7949 * crMean;
- double blueMean = 1.1636 * yMean + 1.9912 * cbMean - 0.0250 * crMean;
+ redMean /= frameContext.awb.gains.red;
+ greenMean /= frameContext.awb.gains.green;
+ blueMean /= frameContext.awb.gains.blue;
- /* Estimate the red and blue gains to apply in a grey world. */
- double redGain = greenMean / (redMean + 1);
- double blueGain = greenMean / (blueMean + 1);
+ /*
+ * If the means are too small we don't have enough information to
+ * meaningfully calculate gains. Freeze the algorithm in that case.
+ */
+ if (redMean < kMeanMinThreshold && greenMean < kMeanMinThreshold &&
+ blueMean < kMeanMinThreshold) {
+ frameContext.awb.temperatureK = activeState.awb.temperatureK;
+ return;
+ }
- /* Filter the values to avoid oscillations. */
- double speed = 0.2;
- redGain = speed * redGain + (1 - speed) * frameContext.awb.gains.red;
- blueGain = speed * blueGain + (1 - speed) * frameContext.awb.gains.blue;
+ activeState.awb.temperatureK = estimateCCT(redMean, greenMean, blueMean);
/*
- * Gain values are unsigned integer value, range 0 to 4 with 8 bit
- * fractional part.
+ * Estimate the red and blue gains to apply in a grey world. The green
+ * gain is hardcoded to 1.0. Avoid divisions by zero by clamping the
+ * divisor to a minimum value of 1.0.
*/
- frameContext.awb.gains.red = std::clamp(redGain, 0.0, 1023.0 / 256);
- frameContext.awb.gains.blue = std::clamp(blueGain, 0.0, 1023.0 / 256);
- /* Hardcode the green gain to 1.0. */
- frameContext.awb.gains.green = 1.0;
+ double redGain = greenMean / std::max(redMean, 1.0);
+ double blueGain = greenMean / std::max(blueMean, 1.0);
- frameContext.awb.temperatureK = estimateCCT(redMean, greenMean, blueMean);
+ /*
+ * Clamp the gain values to the hardware, which expresses gains as Q2.8
+ * unsigned integer values. Set the minimum just above zero to avoid
+ * divisions by zero when computing the raw means in subsequent
+ * iterations.
+ */
+ redGain = std::clamp(redGain, 1.0 / 256, 1023.0 / 256);
+ blueGain = std::clamp(blueGain, 1.0 / 256, 1023.0 / 256);
- LOG(RkISP1Awb, Debug) << "Gain found for red: " << context.frameContext.awb.gains.red
- << " and for blue: " << context.frameContext.awb.gains.blue;
+ /* Filter the values to avoid oscillations. */
+ double speed = 0.2;
+ redGain = speed * redGain + (1 - speed) * activeState.awb.gains.automatic.red;
+ blueGain = speed * blueGain + (1 - speed) * activeState.awb.gains.automatic.blue;
+
+ activeState.awb.gains.automatic.red = redGain;
+ activeState.awb.gains.automatic.blue = blueGain;
+ activeState.awb.gains.automatic.green = 1.0;
+
+ frameContext.awb.temperatureK = activeState.awb.temperatureK;
+
+ metadata.set(controls::AwbEnable, frameContext.awb.autoEnabled);
+ metadata.set(controls::ColourGains, {
+ static_cast<float>(frameContext.awb.gains.red),
+ static_cast<float>(frameContext.awb.gains.blue)
+ });
+ metadata.set(controls::ColourTemperature, frameContext.awb.temperatureK);
+
+ LOG(RkISP1Awb, Debug) << std::showpoint
+ << "Means [" << redMean << ", " << greenMean << ", " << blueMean
+ << "], gains [" << activeState.awb.gains.automatic.red << ", "
+ << activeState.awb.gains.automatic.green << ", "
+ << activeState.awb.gains.automatic.blue << "], temp "
+ << frameContext.awb.temperatureK << "K";
}
REGISTER_IPA_ALGORITHM(Awb, "Awb")
diff --git a/src/ipa/rkisp1/algorithms/awb.h b/src/ipa/rkisp1/algorithms/awb.h
index 7647842f..06c92896 100644
--- a/src/ipa/rkisp1/algorithms/awb.h
+++ b/src/ipa/rkisp1/algorithms/awb.h
@@ -2,13 +2,11 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * awb.h - AWB control algorithm
+ * AWB control algorithm
*/
#pragma once
-#include <linux/rkisp1-config.h>
-
#include "algorithm.h"
namespace libcamera {
@@ -18,16 +16,25 @@ namespace ipa::rkisp1::algorithms {
class Awb : public Algorithm
{
public:
- Awb() = default;
+ Awb();
~Awb() = default;
int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
- void prepare(IPAContext &context, rkisp1_params_cfg *params) override;
- void process(IPAContext &context, IPAFrameContext *frameCtx,
- const rkisp1_stat_buffer *stats) override;
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
private:
uint32_t estimateCCT(double red, double green, double blue);
+
+ bool rgbMode_;
};
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/blc.cpp b/src/ipa/rkisp1/algorithms/blc.cpp
index 3542f61c..d2e74354 100644
--- a/src/ipa/rkisp1/algorithms/blc.cpp
+++ b/src/ipa/rkisp1/algorithms/blc.cpp
@@ -2,11 +2,15 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * blc.cpp - RkISP1 Black Level Correction control
+ * RkISP1 Black Level Correction control
*/
#include "blc.h"
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
/**
* \file blc.h
*/
@@ -29,23 +33,54 @@ namespace ipa::rkisp1::algorithms {
* isn't currently supported.
*/
+LOG_DEFINE_CATEGORY(RkISP1Blc)
+
+BlackLevelCorrection::BlackLevelCorrection()
+ : tuningParameters_(false)
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ blackLevelRed_ = tuningData["R"].get<int16_t>(256);
+ blackLevelGreenR_ = tuningData["Gr"].get<int16_t>(256);
+ blackLevelGreenB_ = tuningData["Gb"].get<int16_t>(256);
+ blackLevelBlue_ = tuningData["B"].get<int16_t>(256);
+
+ tuningParameters_ = true;
+
+ LOG(RkISP1Blc, Debug)
+ << "Black levels: red " << blackLevelRed_
+ << ", green (red) " << blackLevelGreenR_
+ << ", green (blue) " << blackLevelGreenB_
+ << ", blue " << blackLevelBlue_;
+
+ return 0;
+}
+
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
-void BlackLevelCorrection::prepare(IPAContext &context,
+void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
- if (context.frameContext.frameCount > 0)
+ if (frame > 0)
+ return;
+
+ if (!tuningParameters_)
return;
- /*
- * Substract fixed values taken from imx219 tuning file.
- * \todo Use a configuration file for it ?
- */
+
params->others.bls_config.enable_auto = 0;
- params->others.bls_config.fixed_val.r = 256;
- params->others.bls_config.fixed_val.gr = 256;
- params->others.bls_config.fixed_val.gb = 256;
- params->others.bls_config.fixed_val.b = 256;
+ params->others.bls_config.fixed_val.r = blackLevelRed_;
+ params->others.bls_config.fixed_val.gr = blackLevelGreenR_;
+ params->others.bls_config.fixed_val.gb = blackLevelGreenB_;
+ params->others.bls_config.fixed_val.b = blackLevelBlue_;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_BLS;
params->module_ens |= RKISP1_CIF_ISP_MODULE_BLS;
diff --git a/src/ipa/rkisp1/algorithms/blc.h b/src/ipa/rkisp1/algorithms/blc.h
index 69874d8f..460ebcc1 100644
--- a/src/ipa/rkisp1/algorithms/blc.h
+++ b/src/ipa/rkisp1/algorithms/blc.h
@@ -2,28 +2,34 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * blc.h - RkISP1 Black Level Correction control
+ * RkISP1 Black Level Correction control
*/
#pragma once
-#include <linux/rkisp1-config.h>
-
#include "algorithm.h"
namespace libcamera {
-struct IPACameraSensorInfo;
-
namespace ipa::rkisp1::algorithms {
class BlackLevelCorrection : public Algorithm
{
public:
- BlackLevelCorrection() = default;
+ BlackLevelCorrection();
~BlackLevelCorrection() = default;
- void prepare(IPAContext &context, rkisp1_params_cfg *params) override;
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+
+private:
+ bool tuningParameters_;
+ int16_t blackLevelRed_;
+ int16_t blackLevelGreenR_;
+ int16_t blackLevelGreenB_;
+ int16_t blackLevelBlue_;
};
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/cproc.cpp b/src/ipa/rkisp1/algorithms/cproc.cpp
new file mode 100644
index 00000000..68bb8180
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/cproc.cpp
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Color Processing control
+ */
+
+#include "cproc.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file cproc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class ColorProcessing
+ * \brief RkISP1 Color Processing control
+ *
+ * The ColorProcessing algorithm is responsible for applying brightness,
+ * contrast and saturation corrections. The values are directly provided
+ * through requests by the corresponding controls.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1CProc)
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void ColorProcessing::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &cproc = context.activeState.cproc;
+ bool update = false;
+
+ const auto &brightness = controls.get(controls::Brightness);
+ if (brightness) {
+ int value = std::clamp<int>(std::lround(*brightness * 128), -128, 127);
+ if (cproc.brightness != value) {
+ cproc.brightness = value;
+ update = true;
+ }
+
+ LOG(RkISP1CProc, Debug) << "Set brightness to " << value;
+ }
+
+ const auto &contrast = controls.get(controls::Contrast);
+ if (contrast) {
+ int value = std::clamp<int>(std::lround(*contrast * 128), 0, 255);
+ if (cproc.contrast != value) {
+ cproc.contrast = value;
+ update = true;
+ }
+
+ LOG(RkISP1CProc, Debug) << "Set contrast to " << value;
+ }
+
+ const auto saturation = controls.get(controls::Saturation);
+ if (saturation) {
+ int value = std::clamp<int>(std::lround(*saturation * 128), 0, 255);
+ if (cproc.saturation != value) {
+ cproc.saturation = value;
+ update = true;
+ }
+
+ LOG(RkISP1CProc, Debug) << "Set saturation to " << value;
+ }
+
+ frameContext.cproc.brightness = cproc.brightness;
+ frameContext.cproc.contrast = cproc.contrast;
+ frameContext.cproc.saturation = cproc.saturation;
+ frameContext.cproc.update = update;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void ColorProcessing::prepare([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params)
+{
+ /* Check if the algorithm configuration has been updated. */
+ if (!frameContext.cproc.update)
+ return;
+
+ params->others.cproc_config.brightness = frameContext.cproc.brightness;
+ params->others.cproc_config.contrast = frameContext.cproc.contrast;
+ params->others.cproc_config.sat = frameContext.cproc.saturation;
+
+ params->module_en_update |= RKISP1_CIF_ISP_MODULE_CPROC;
+ params->module_ens |= RKISP1_CIF_ISP_MODULE_CPROC;
+ params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_CPROC;
+}
+
+REGISTER_IPA_ALGORITHM(ColorProcessing, "ColorProcessing")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/cproc.h b/src/ipa/rkisp1/algorithms/cproc.h
new file mode 100644
index 00000000..bafba5cc
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/cproc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Color Processing control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class ColorProcessing : public Algorithm
+{
+public:
+ ColorProcessing() = default;
+ ~ColorProcessing() = default;
+
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/dpcc.cpp b/src/ipa/rkisp1/algorithms/dpcc.cpp
new file mode 100644
index 00000000..b5a339e9
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/dpcc.cpp
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Defect Pixel Cluster Correction control
+ */
+
+#include "dpcc.h"
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file dpcc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class DefectPixelClusterCorrection
+ * \brief RkISP1 Defect Pixel Cluster Correction control
+ *
+ * Depending of the sensor quality, some pixels can be defective and then
+ * appear significantly brighter or darker than the other pixels.
+ *
+ * The Defect Pixel Cluster Correction algorithms is responsible to minimize
+ * the impact of the pixels. This can be done with algorithms applied at run
+ * time (on-the-fly method) or with a table of defective pixels. Only the first
+ * method is supported for the moment.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Dpcc)
+
+DefectPixelClusterCorrection::DefectPixelClusterCorrection()
+ : config_({})
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int DefectPixelClusterCorrection::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ config_.mode = RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE;
+ config_.output_mode = RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER
+ | RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER;
+
+ config_.set_use = tuningData["fixed-set"].get<bool>(false)
+ ? RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET : 0;
+
+ /* Get all defined sets to apply (up to 3). */
+ const YamlObject &setsObject = tuningData["sets"];
+ if (!setsObject.isList()) {
+ LOG(RkISP1Dpcc, Error)
+ << "'sets' parameter not found in tuning file";
+ return -EINVAL;
+ }
+
+ if (setsObject.size() > RKISP1_CIF_ISP_DPCC_METHODS_MAX) {
+ LOG(RkISP1Dpcc, Error)
+ << "'sets' size in tuning file (" << setsObject.size()
+ << ") exceeds the maximum hardware capacity (3)";
+ return -EINVAL;
+ }
+
+ for (std::size_t i = 0; i < setsObject.size(); ++i) {
+ struct rkisp1_cif_isp_dpcc_methods_config &method = config_.methods[i];
+ const YamlObject &set = setsObject[i];
+ uint16_t value;
+
+ /* Enable set if described in YAML tuning file. */
+ config_.set_use |= 1 << i;
+
+ /* PG Method */
+ const YamlObject &pgObject = set["pg-factor"];
+
+ if (pgObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE;
+
+ value = pgObject["green"].get<uint16_t>(0);
+ method.pg_fac |= RKISP1_CIF_ISP_DPCC_PG_FAC_G(value);
+ }
+
+ if (pgObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE;
+
+ value = pgObject["red-blue"].get<uint16_t>(0);
+ method.pg_fac |= RKISP1_CIF_ISP_DPCC_PG_FAC_RB(value);
+ }
+
+ /* RO Method */
+ const YamlObject &roObject = set["ro-limits"];
+
+ if (roObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE;
+
+ value = roObject["green"].get<uint16_t>(0);
+ config_.ro_limits |=
+ RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(i, value);
+ }
+
+ if (roObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE;
+
+ value = roObject["red-blue"].get<uint16_t>(0);
+ config_.ro_limits |=
+ RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(i, value);
+ }
+
+ /* RG Method */
+ const YamlObject &rgObject = set["rg-factor"];
+ method.rg_fac = 0;
+
+ if (rgObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE;
+
+ value = rgObject["green"].get<uint16_t>(0);
+ method.rg_fac |= RKISP1_CIF_ISP_DPCC_RG_FAC_G(value);
+ }
+
+ if (rgObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE;
+
+ value = rgObject["red-blue"].get<uint16_t>(0);
+ method.rg_fac |= RKISP1_CIF_ISP_DPCC_RG_FAC_RB(value);
+ }
+
+ /* RND Method */
+ const YamlObject &rndOffsetsObject = set["rnd-offsets"];
+
+ if (rndOffsetsObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE;
+
+ value = rndOffsetsObject["green"].get<uint16_t>(0);
+ config_.rnd_offs |=
+ RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(i, value);
+ }
+
+ if (rndOffsetsObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE;
+
+ value = rndOffsetsObject["red-blue"].get<uint16_t>(0);
+ config_.rnd_offs |=
+ RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(i, value);
+ }
+
+ const YamlObject &rndThresholdObject = set["rnd-threshold"];
+ method.rnd_thresh = 0;
+
+ if (rndThresholdObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE;
+
+ value = rndThresholdObject["green"].get<uint16_t>(0);
+ method.rnd_thresh |=
+ RKISP1_CIF_ISP_DPCC_RND_THRESH_G(value);
+ }
+
+ if (rndThresholdObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE;
+
+ value = rndThresholdObject["red-blue"].get<uint16_t>(0);
+ method.rnd_thresh |=
+ RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(value);
+ }
+
+ /* LC Method */
+ const YamlObject &lcThresholdObject = set["line-threshold"];
+ method.line_thresh = 0;
+
+ if (lcThresholdObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE;
+
+ value = lcThresholdObject["green"].get<uint16_t>(0);
+ method.line_thresh |=
+ RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(value);
+ }
+
+ if (lcThresholdObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE;
+
+ value = lcThresholdObject["red-blue"].get<uint16_t>(0);
+ method.line_thresh |=
+ RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(value);
+ }
+
+ const YamlObject &lcTMadFactorObject = set["line-mad-factor"];
+ method.line_mad_fac = 0;
+
+ if (lcTMadFactorObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE;
+
+ value = lcTMadFactorObject["green"].get<uint16_t>(0);
+ method.line_mad_fac |=
+ RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(value);
+ }
+
+ if (lcTMadFactorObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE;
+
+ value = lcTMadFactorObject["red-blue"].get<uint16_t>(0);
+ method.line_mad_fac |=
+ RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(value);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void DefectPixelClusterCorrection::prepare([[maybe_unused]] IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params)
+{
+ if (frame > 0)
+ return;
+
+ params->others.dpcc_config = config_;
+
+ params->module_en_update |= RKISP1_CIF_ISP_MODULE_DPCC;
+ params->module_ens |= RKISP1_CIF_ISP_MODULE_DPCC;
+ params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_DPCC;
+}
+
+REGISTER_IPA_ALGORITHM(DefectPixelClusterCorrection, "DefectPixelClusterCorrection")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/dpcc.h b/src/ipa/rkisp1/algorithms/dpcc.h
new file mode 100644
index 00000000..d39b7bed
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/dpcc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Defect Pixel Cluster Correction control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class DefectPixelClusterCorrection : public Algorithm
+{
+public:
+ DefectPixelClusterCorrection();
+ ~DefectPixelClusterCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+
+private:
+ rkisp1_cif_isp_dpcc_config config_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/dpf.cpp b/src/ipa/rkisp1/algorithms/dpf.cpp
new file mode 100644
index 00000000..abf95728
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/dpf.cpp
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Denoise Pre-Filter control
+ */
+
+#include "dpf.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file dpf.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Dpf
+ * \brief RkISP1 Denoise Pre-Filter control
+ *
+ * The denoise pre-filter algorithm is a bilateral filter which combines a
+ * range filter and a domain filter. The denoise pre-filter is applied before
+ * demosaicing.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Dpf)
+
+Dpf::Dpf()
+ : config_({}), strengthConfig_({})
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Dpf::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ std::vector<uint8_t> values;
+
+ /*
+ * The domain kernel is configured with a 9x9 kernel for the green
+ * pixels, and a 13x9 or 9x9 kernel for red and blue pixels.
+ */
+ const YamlObject &dFObject = tuningData["DomainFilter"];
+
+ /*
+ * For the green component, we have the 9x9 kernel specified
+ * as 6 coefficients:
+ * Y
+ * ^
+ * 4 | 6 5 4 5 6
+ * 3 | 5 3 3 5
+ * 2 | 5 3 2 3 5
+ * 1 | 3 1 1 3
+ * 0 - 4 2 0 2 4
+ * -1 | 3 1 1 3
+ * -2 | 5 3 2 3 5
+ * -3 | 5 3 3 5
+ * -4 | 6 5 4 5 6
+ * +---------|--------> X
+ * -4....-1 0 1 2 3 4
+ */
+ values = dFObject["g"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ if (values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS) {
+ LOG(RkISP1Dpf, Error)
+ << "Invalid 'DomainFilter:g': expected "
+ << RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
+ << " elements, got " << values.size();
+ return -EINVAL;
+ }
+
+ std::copy_n(values.begin(), values.size(),
+ std::begin(config_.g_flt.spatial_coeff));
+
+ config_.g_flt.gr_enable = true;
+ config_.g_flt.gb_enable = true;
+
+ /*
+ * For the red and blue components, we have the 13x9 kernel specified
+ * as 6 coefficients:
+ *
+ * Y
+ * ^
+ * 4 | 6 5 4 3 4 5 6
+ * |
+ * 2 | 5 4 2 1 2 4 5
+ * |
+ * 0 - 5 3 1 0 1 3 5
+ * |
+ * -2 | 5 4 2 1 2 4 5
+ * |
+ * -4 | 6 5 4 3 4 5 6
+ * +-------------|------------> X
+ * -6 -4 -2 0 2 4 6
+ *
+ * For a 9x9 kernel, columns -6 and 6 are dropped, so coefficient
+ * number 6 is not used.
+ */
+ values = dFObject["rb"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ if (values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS &&
+ values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS - 1) {
+ LOG(RkISP1Dpf, Error)
+ << "Invalid 'DomainFilter:rb': expected "
+ << RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS - 1
+ << " or " << RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
+ << " elements, got " << values.size();
+ return -EINVAL;
+ }
+
+ config_.rb_flt.fltsize = values.size() == RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
+ ? RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9
+ : RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9;
+
+ std::copy_n(values.begin(), values.size(),
+ std::begin(config_.rb_flt.spatial_coeff));
+
+ config_.rb_flt.r_enable = true;
+ config_.rb_flt.b_enable = true;
+
+ /*
+ * The range kernel is configured with a noise level lookup table (NLL)
+ * which stores a piecewise linear function that characterizes the
+ * sensor noise profile as a noise level function curve (NLF).
+ */
+ const YamlObject &rFObject = tuningData["NoiseLevelFunction"];
+
+ std::vector<uint16_t> nllValues;
+ nllValues = rFObject["coeff"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (nllValues.size() != RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS) {
+ LOG(RkISP1Dpf, Error)
+ << "Invalid 'RangeFilter:coeff': expected "
+ << RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS
+ << " elements, got " << nllValues.size();
+ return -EINVAL;
+ }
+
+ std::copy_n(nllValues.begin(), nllValues.size(),
+ std::begin(config_.nll.coeff));
+
+ std::string scaleMode = rFObject["scale-mode"].get<std::string>("");
+ if (scaleMode == "linear") {
+ config_.nll.scale_mode = RKISP1_CIF_ISP_NLL_SCALE_LINEAR;
+ } else if (scaleMode == "logarithmic") {
+ config_.nll.scale_mode = RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC;
+ } else {
+ LOG(RkISP1Dpf, Error)
+ << "Invalid 'RangeFilter:scale-mode': expected "
+ << "'linear' or 'logarithmic' value, got "
+ << scaleMode;
+ return -EINVAL;
+ }
+
+ const YamlObject &fSObject = tuningData["FilterStrength"];
+
+ strengthConfig_.r = fSObject["r"].get<uint16_t>(64);
+ strengthConfig_.g = fSObject["g"].get<uint16_t>(64);
+ strengthConfig_.b = fSObject["b"].get<uint16_t>(64);
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void Dpf::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &dpf = context.activeState.dpf;
+ bool update = false;
+
+ const auto &denoise = controls.get(controls::draft::NoiseReductionMode);
+ if (denoise) {
+ LOG(RkISP1Dpf, Debug) << "Set denoise to " << *denoise;
+
+ switch (*denoise) {
+ case controls::draft::NoiseReductionModeOff:
+ if (dpf.denoise) {
+ dpf.denoise = false;
+ update = true;
+ }
+ break;
+ case controls::draft::NoiseReductionModeMinimal:
+ case controls::draft::NoiseReductionModeHighQuality:
+ case controls::draft::NoiseReductionModeFast:
+ if (!dpf.denoise) {
+ dpf.denoise = true;
+ update = true;
+ }
+ break;
+ default:
+ LOG(RkISP1Dpf, Error)
+ << "Unsupported denoise value "
+ << *denoise;
+ break;
+ }
+ }
+
+ frameContext.dpf.denoise = dpf.denoise;
+ frameContext.dpf.update = update;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Dpf::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, rkisp1_params_cfg *params)
+{
+ if (frame == 0) {
+ params->others.dpf_config = config_;
+ params->others.dpf_strength_config = strengthConfig_;
+
+ const auto &awb = context.configuration.awb;
+ const auto &lsc = context.configuration.lsc;
+ auto &mode = params->others.dpf_config.gain.mode;
+
+ /*
+ * The DPF needs to take into account the total amount of
+ * digital gain, which comes from the AWB and LSC modules. The
+ * DPF hardware can be programmed with a digital gain value
+ * manually, but can also use the gains supplied by the AWB and
+ * LSC modules automatically when they are enabled. Use that
+ * mode of operation as it simplifies control of the DPF.
+ */
+ if (awb.enabled && lsc.enabled)
+ mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS;
+ else if (awb.enabled)
+ mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS;
+ else if (lsc.enabled)
+ mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS;
+ else
+ mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED;
+
+ params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_DPF |
+ RKISP1_CIF_ISP_MODULE_DPF_STRENGTH;
+ }
+
+ if (frameContext.dpf.update) {
+ params->module_en_update |= RKISP1_CIF_ISP_MODULE_DPF;
+ if (frameContext.dpf.denoise)
+ params->module_ens |= RKISP1_CIF_ISP_MODULE_DPF;
+ }
+}
+
+REGISTER_IPA_ALGORITHM(Dpf, "Dpf")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/dpf.h b/src/ipa/rkisp1/algorithms/dpf.h
new file mode 100644
index 00000000..da0115ba
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/dpf.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Denoise Pre-Filter control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Dpf : public Algorithm
+{
+public:
+ Dpf();
+ ~Dpf() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+
+private:
+ struct rkisp1_cif_isp_dpf_config config_;
+ struct rkisp1_cif_isp_dpf_strength_config strengthConfig_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/filter.cpp b/src/ipa/rkisp1/algorithms/filter.cpp
new file mode 100644
index 00000000..9752248a
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/filter.cpp
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Filter control
+ */
+
+#include "filter.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file filter.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Filter
+ * \brief RkISP1 Filter control
+ *
+ * Denoise and Sharpness filters will be applied by RkISP1 during the
+ * demosaicing step. The denoise filter is responsible for removing noise from
+ * the image, while the sharpness filter will enhance its acutance.
+ *
+ * \todo In current version the denoise and sharpness control is based on user
+ * controls. In a future version it should be controlled automatically by the
+ * algorithm.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Filter)
+
+static constexpr uint32_t kFiltLumWeightDefault = 0x00022040;
+static constexpr uint32_t kFiltModeDefault = 0x000004f2;
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void Filter::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &filter = context.activeState.filter;
+ bool update = false;
+
+ const auto &sharpness = controls.get(controls::Sharpness);
+ if (sharpness) {
+ unsigned int value = std::round(std::clamp(*sharpness, 0.0f, 10.0f));
+
+ if (filter.sharpness != value) {
+ filter.sharpness = value;
+ update = true;
+ }
+
+ LOG(RkISP1Filter, Debug) << "Set sharpness to " << *sharpness;
+ }
+
+ const auto &denoise = controls.get(controls::draft::NoiseReductionMode);
+ if (denoise) {
+ LOG(RkISP1Filter, Debug) << "Set denoise to " << *denoise;
+
+ switch (*denoise) {
+ case controls::draft::NoiseReductionModeOff:
+ if (filter.denoise != 0) {
+ filter.denoise = 0;
+ update = true;
+ }
+ break;
+ case controls::draft::NoiseReductionModeMinimal:
+ if (filter.denoise != 1) {
+ filter.denoise = 1;
+ update = true;
+ }
+ break;
+ case controls::draft::NoiseReductionModeHighQuality:
+ case controls::draft::NoiseReductionModeFast:
+ if (filter.denoise != 3) {
+ filter.denoise = 3;
+ update = true;
+ }
+ break;
+ default:
+ LOG(RkISP1Filter, Error)
+ << "Unsupported denoise value "
+ << *denoise;
+ break;
+ }
+ }
+
+ frameContext.filter.denoise = filter.denoise;
+ frameContext.filter.sharpness = filter.sharpness;
+ frameContext.filter.update = update;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Filter::prepare([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext, rkisp1_params_cfg *params)
+{
+ /* Check if the algorithm configuration has been updated. */
+ if (!frameContext.filter.update)
+ return;
+
+ static constexpr uint16_t filt_fac_sh0[] = {
+ 0x04, 0x07, 0x0a, 0x0c, 0x10, 0x14, 0x1a, 0x1e, 0x24, 0x2a, 0x30
+ };
+
+ static constexpr uint16_t filt_fac_sh1[] = {
+ 0x04, 0x08, 0x0c, 0x10, 0x16, 0x1b, 0x20, 0x26, 0x2c, 0x30, 0x3f
+ };
+
+ static constexpr uint16_t filt_fac_mid[] = {
+ 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x10, 0x13, 0x17, 0x1d, 0x22, 0x28
+ };
+
+ static constexpr uint16_t filt_fac_bl0[] = {
+ 0x02, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x10, 0x15, 0x1a, 0x24
+ };
+
+ static constexpr uint16_t filt_fac_bl1[] = {
+ 0x00, 0x00, 0x00, 0x02, 0x04, 0x04, 0x06, 0x08, 0x0d, 0x14, 0x20
+ };
+
+ static constexpr uint16_t filt_thresh_sh0[] = {
+ 0, 18, 26, 36, 41, 75, 90, 120, 170, 250, 1023
+ };
+
+ static constexpr uint16_t filt_thresh_sh1[] = {
+ 0, 33, 44, 51, 67, 100, 120, 150, 200, 300, 1023
+ };
+
+ static constexpr uint16_t filt_thresh_bl0[] = {
+ 0, 8, 13, 23, 26, 50, 60, 80, 140, 180, 1023
+ };
+
+ static constexpr uint16_t filt_thresh_bl1[] = {
+ 0, 2, 5, 10, 15, 20, 26, 51, 100, 150, 1023
+ };
+
+ static constexpr uint16_t stage1_select[] = {
+ 6, 6, 4, 4, 3, 3, 2, 2, 2, 1, 0
+ };
+
+ static constexpr uint16_t filt_chr_v_mode[] = {
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ };
+
+ static constexpr uint16_t filt_chr_h_mode[] = {
+ 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ };
+
+ uint8_t denoise = frameContext.filter.denoise;
+ uint8_t sharpness = frameContext.filter.sharpness;
+ auto &flt_config = params->others.flt_config;
+
+ flt_config.fac_sh0 = filt_fac_sh0[sharpness];
+ flt_config.fac_sh1 = filt_fac_sh1[sharpness];
+ flt_config.fac_mid = filt_fac_mid[sharpness];
+ flt_config.fac_bl0 = filt_fac_bl0[sharpness];
+ flt_config.fac_bl1 = filt_fac_bl1[sharpness];
+
+ flt_config.lum_weight = kFiltLumWeightDefault;
+ flt_config.mode = kFiltModeDefault;
+ flt_config.thresh_sh0 = filt_thresh_sh0[denoise];
+ flt_config.thresh_sh1 = filt_thresh_sh1[denoise];
+ flt_config.thresh_bl0 = filt_thresh_bl0[denoise];
+ flt_config.thresh_bl1 = filt_thresh_bl1[denoise];
+ flt_config.grn_stage1 = stage1_select[denoise];
+ flt_config.chr_v_mode = filt_chr_v_mode[denoise];
+ flt_config.chr_h_mode = filt_chr_h_mode[denoise];
+
+ /*
+ * Combined high denoising and high sharpening requires some
+ * adjustments to the configuration of the filters. A first stage
+ * filter with a lower strength must be selected, and the blur factors
+ * must be decreased.
+ */
+ if (denoise == 9) {
+ if (sharpness > 3)
+ flt_config.grn_stage1 = 2;
+ } else if (denoise == 10) {
+ if (sharpness > 5)
+ flt_config.grn_stage1 = 2;
+ else if (sharpness > 3)
+ flt_config.grn_stage1 = 1;
+ }
+
+ if (denoise > 7) {
+ if (sharpness > 7) {
+ flt_config.fac_bl0 /= 2;
+ flt_config.fac_bl1 /= 4;
+ } else if (sharpness > 4) {
+ flt_config.fac_bl0 = flt_config.fac_bl0 * 3 / 4;
+ flt_config.fac_bl1 /= 2;
+ }
+ }
+
+ params->module_en_update |= RKISP1_CIF_ISP_MODULE_FLT;
+ params->module_ens |= RKISP1_CIF_ISP_MODULE_FLT;
+ params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_FLT;
+}
+
+REGISTER_IPA_ALGORITHM(Filter, "Filter")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/filter.h b/src/ipa/rkisp1/algorithms/filter.h
new file mode 100644
index 00000000..d595811d
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/filter.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Filter control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Filter : public Algorithm
+{
+public:
+ Filter() = default;
+ ~Filter() = default;
+
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/gsl.cpp b/src/ipa/rkisp1/algorithms/gsl.cpp
new file mode 100644
index 00000000..9b056c6e
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/gsl.cpp
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Gamma Sensor Linearization control
+ */
+
+#include "gsl.h"
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file gsl.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class GammaSensorLinearization
+ * \brief RkISP1 Gamma Sensor Linearization control
+ *
+ * This algorithm linearizes the sensor output to compensate the sensor
+ * non-linearities by applying piecewise linear functions to the red, green and
+ * blue channels.
+ *
+ * The curves are specified in the tuning data and defined using 17 points.
+ *
+ * - The X coordinates are expressed using 16 intervals, with the first point
+ * at X coordinate 0. Each interval is expressed as a 2-bit value DX (from
+ * GAMMA_DX_1 to GAMMA_DX_16), stored in the RKISP1_CIF_ISP_GAMMA_DX_LO and
+ * RKISP1_CIF_ISP_GAMMA_DX_HI registers. The real interval is equal to
+ * \f$2^{dx+4}\f$. X coordinates are shared between the red, green and blue
+ * curves.
+ *
+ * - The Y coordinates are specified as 17 values separately for the
+ * red, green and blue channels, with a 12-bit resolution. Each value must be
+ * in the [-2048, 2047] range compared to the previous value.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Gsl)
+
+static constexpr unsigned int kDegammaXIntervals = 16;
+
+GammaSensorLinearization::GammaSensorLinearization()
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int GammaSensorLinearization::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ std::vector<uint16_t> xIntervals =
+ tuningData["x-intervals"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (xIntervals.size() != kDegammaXIntervals) {
+ LOG(RkISP1Gsl, Error)
+ << "Invalid 'x' coordinates: expected "
+ << kDegammaXIntervals << " elements, got "
+ << xIntervals.size();
+
+ return -EINVAL;
+ }
+
+ /* Compute gammaDx_ intervals from xIntervals values */
+ gammaDx_[0] = 0;
+ gammaDx_[1] = 0;
+ for (unsigned int i = 0; i < kDegammaXIntervals; ++i)
+ gammaDx_[i / 8] |= (xIntervals[i] & 0x07) << ((i % 8) * 4);
+
+ const YamlObject &yObject = tuningData["y"];
+ if (!yObject.isDictionary()) {
+ LOG(RkISP1Gsl, Error)
+ << "Issue while parsing 'y' in tuning file: "
+ << "entry must be a dictionary";
+ return -EINVAL;
+ }
+
+ curveYr_ = yObject["red"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (curveYr_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
+ LOG(RkISP1Gsl, Error)
+ << "Invalid 'y:red' coordinates: expected "
+ << RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
+ << " elements, got " << curveYr_.size();
+ return -EINVAL;
+ }
+
+ curveYg_ = yObject["green"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (curveYg_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
+ LOG(RkISP1Gsl, Error)
+ << "Invalid 'y:green' coordinates: expected "
+ << RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
+ << " elements, got " << curveYg_.size();
+ return -EINVAL;
+ }
+
+ curveYb_ = yObject["blue"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (curveYb_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
+ LOG(RkISP1Gsl, Error)
+ << "Invalid 'y:blue' coordinates: expected "
+ << RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
+ << " elements, got " << curveYb_.size();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void GammaSensorLinearization::prepare([[maybe_unused]] IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params)
+{
+ if (frame > 0)
+ return;
+
+ params->others.sdg_config.xa_pnts.gamma_dx0 = gammaDx_[0];
+ params->others.sdg_config.xa_pnts.gamma_dx1 = gammaDx_[1];
+
+ std::copy(curveYr_.begin(), curveYr_.end(),
+ params->others.sdg_config.curve_r.gamma_y);
+ std::copy(curveYg_.begin(), curveYg_.end(),
+ params->others.sdg_config.curve_g.gamma_y);
+ std::copy(curveYb_.begin(), curveYb_.end(),
+ params->others.sdg_config.curve_b.gamma_y);
+
+ params->module_en_update |= RKISP1_CIF_ISP_MODULE_SDG;
+ params->module_ens |= RKISP1_CIF_ISP_MODULE_SDG;
+ params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_SDG;
+}
+
+REGISTER_IPA_ALGORITHM(GammaSensorLinearization, "GammaSensorLinearization")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/gsl.h b/src/ipa/rkisp1/algorithms/gsl.h
new file mode 100644
index 00000000..c404105e
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/gsl.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Gamma Sensor Linearization control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class GammaSensorLinearization : public Algorithm
+{
+public:
+ GammaSensorLinearization();
+ ~GammaSensorLinearization() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+
+private:
+ uint32_t gammaDx_[2];
+ std::vector<uint16_t> curveYr_;
+ std::vector<uint16_t> curveYg_;
+ std::vector<uint16_t> curveYb_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/lsc.cpp b/src/ipa/rkisp1/algorithms/lsc.cpp
new file mode 100644
index 00000000..161183fc
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lsc.cpp
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Lens Shading Correction control
+ */
+
+#include "lsc.h"
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file lsc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class LensShadingCorrection
+ * \brief RkISP1 Lens Shading Correction control
+ *
+ * Due to the optical characteristics of the lens, the light intensity received
+ * by the sensor is not uniform.
+ *
+ * The Lens Shading Correction algorithm applies multipliers to all pixels
+ * to compensate for the lens shading effect. The coefficients are
+ * specified in a downscaled table in the YAML tuning file.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Lsc)
+
+static std::vector<double> parseSizes(const YamlObject &tuningData,
+ const char *prop)
+{
+ std::vector<double> sizes =
+ tuningData[prop].getList<double>().value_or(std::vector<double>{});
+ if (sizes.size() != RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE) {
+ LOG(RkISP1Lsc, Error)
+ << "Invalid '" << prop << "' values: expected "
+ << RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE
+ << " elements, got " << sizes.size();
+ return {};
+ }
+
+ /*
+ * The sum of all elements must be 0.5 to satisfy hardware constraints.
+ * Validate it here, allowing a 1% tolerance as rounding errors may
+ * prevent an exact match (further adjustments will be performed in
+ * LensShadingCorrection::prepare()).
+ */
+ double sum = std::accumulate(sizes.begin(), sizes.end(), 0.0);
+ if (sum < 0.495 || sum > 0.505) {
+ LOG(RkISP1Lsc, Error)
+ << "Invalid '" << prop << "' values: sum of the elements"
+ << " should be 0.5, got " << sum;
+ return {};
+ }
+
+ return sizes;
+}
+
+static std::vector<uint16_t> parseTable(const YamlObject &tuningData,
+ const char *prop)
+{
+ static constexpr unsigned int kLscNumSamples =
+ RKISP1_CIF_ISP_LSC_SAMPLES_MAX * RKISP1_CIF_ISP_LSC_SAMPLES_MAX;
+
+ std::vector<uint16_t> table =
+ tuningData[prop].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (table.size() != kLscNumSamples) {
+ LOG(RkISP1Lsc, Error)
+ << "Invalid '" << prop << "' values: expected "
+ << kLscNumSamples
+ << " elements, got " << table.size();
+ return {};
+ }
+
+ return table;
+}
+
+LensShadingCorrection::LensShadingCorrection()
+ : lastCt_({ 0, 0 })
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int LensShadingCorrection::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ xSize_ = parseSizes(tuningData, "x-size");
+ ySize_ = parseSizes(tuningData, "y-size");
+
+ if (xSize_.empty() || ySize_.empty())
+ return -EINVAL;
+
+ /* Get all defined sets to apply. */
+ const YamlObject &yamlSets = tuningData["sets"];
+ if (!yamlSets.isList()) {
+ LOG(RkISP1Lsc, Error)
+ << "'sets' parameter not found in tuning file";
+ return -EINVAL;
+ }
+
+ const auto &sets = yamlSets.asList();
+ for (const auto &yamlSet : sets) {
+ uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
+
+ if (sets_.count(ct)) {
+ LOG(RkISP1Lsc, Error)
+ << "Multiple sets found for color temperature "
+ << ct;
+ return -EINVAL;
+ }
+
+ Components &set = sets_[ct];
+
+ set.ct = ct;
+ set.r = parseTable(yamlSet, "r");
+ set.gr = parseTable(yamlSet, "gr");
+ set.gb = parseTable(yamlSet, "gb");
+ set.b = parseTable(yamlSet, "b");
+
+ if (set.r.empty() || set.gr.empty() ||
+ set.gb.empty() || set.b.empty()) {
+ LOG(RkISP1Lsc, Error)
+ << "Set for color temperature " << ct
+ << " is missing tables";
+ return -EINVAL;
+ }
+ }
+
+ if (sets_.empty()) {
+ LOG(RkISP1Lsc, Error) << "Failed to load any sets";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int LensShadingCorrection::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ const Size &size = context.configuration.sensor.size;
+ Size totalSize{};
+
+ for (unsigned int i = 0; i < RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE; ++i) {
+ xSizes_[i] = xSize_[i] * size.width;
+ ySizes_[i] = ySize_[i] * size.height;
+
+ /*
+ * To prevent unexpected behavior of the ISP, the sum of x_size_tbl and
+ * y_size_tbl items shall be equal to respectively size.width/2 and
+ * size.height/2. Enforce it by computing the last tables value to avoid
+ * rounding-induced errors.
+ */
+ if (i == RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE - 1) {
+ xSizes_[i] = size.width / 2 - totalSize.width;
+ ySizes_[i] = size.height / 2 - totalSize.height;
+ }
+
+ totalSize.width += xSizes_[i];
+ totalSize.height += ySizes_[i];
+
+ xGrad_[i] = std::round(32768 / xSizes_[i]);
+ yGrad_[i] = std::round(32768 / ySizes_[i]);
+ }
+
+ context.configuration.lsc.enabled = true;
+ return 0;
+}
+
+void LensShadingCorrection::setParameters(rkisp1_params_cfg *params)
+{
+ struct rkisp1_cif_isp_lsc_config &config = params->others.lsc_config;
+
+ memcpy(config.x_grad_tbl, xGrad_, sizeof(config.x_grad_tbl));
+ memcpy(config.y_grad_tbl, yGrad_, sizeof(config.y_grad_tbl));
+ memcpy(config.x_size_tbl, xSizes_, sizeof(config.x_size_tbl));
+ memcpy(config.y_size_tbl, ySizes_, sizeof(config.y_size_tbl));
+
+ params->module_en_update |= RKISP1_CIF_ISP_MODULE_LSC;
+ params->module_ens |= RKISP1_CIF_ISP_MODULE_LSC;
+ params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_LSC;
+}
+
+void LensShadingCorrection::copyTable(rkisp1_cif_isp_lsc_config &config,
+ const Components &set)
+{
+ std::copy(set.r.begin(), set.r.end(), &config.r_data_tbl[0][0]);
+ std::copy(set.gr.begin(), set.gr.end(), &config.gr_data_tbl[0][0]);
+ std::copy(set.gb.begin(), set.gb.end(), &config.gb_data_tbl[0][0]);
+ std::copy(set.b.begin(), set.b.end(), &config.b_data_tbl[0][0]);
+}
+
+/*
+ * Interpolate LSC parameters based on color temperature value.
+ */
+void LensShadingCorrection::interpolateTable(rkisp1_cif_isp_lsc_config &config,
+ const Components &set0,
+ const Components &set1,
+ const uint32_t ct)
+{
+ double coeff0 = (set1.ct - ct) / static_cast<double>(set1.ct - set0.ct);
+ double coeff1 = (ct - set0.ct) / static_cast<double>(set1.ct - set0.ct);
+
+ for (unsigned int i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; ++i) {
+ for (unsigned int j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; ++j) {
+ unsigned int sample = i * RKISP1_CIF_ISP_LSC_SAMPLES_MAX + j;
+
+ config.r_data_tbl[i][j] =
+ set0.r[sample] * coeff0 +
+ set1.r[sample] * coeff1;
+
+ config.gr_data_tbl[i][j] =
+ set0.gr[sample] * coeff0 +
+ set1.gr[sample] * coeff1;
+
+ config.gb_data_tbl[i][j] =
+ set0.gb[sample] * coeff0 +
+ set1.gb[sample] * coeff1;
+
+ config.b_data_tbl[i][j] =
+ set0.b[sample] * coeff0 +
+ set1.b[sample] * coeff1;
+ }
+ }
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void LensShadingCorrection::prepare(IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params)
+{
+ struct rkisp1_cif_isp_lsc_config &config = params->others.lsc_config;
+
+ /*
+ * If there is only one set, the configuration has already been done
+ * for first frame.
+ */
+ if (sets_.size() == 1 && frame > 0)
+ return;
+
+ /*
+ * If there is only one set, pick it. We can ignore lastCt_, as it will
+ * never be relevant.
+ */
+ if (sets_.size() == 1) {
+ setParameters(params);
+ copyTable(config, sets_.cbegin()->second);
+ return;
+ }
+
+ uint32_t ct = context.activeState.awb.temperatureK;
+ ct = std::clamp(ct, sets_.cbegin()->first, sets_.crbegin()->first);
+
+ /*
+ * If the original is the same, then it means the same adjustment would
+ * be made. If the adjusted is the same, then it means that it's the
+ * same as what was actually applied. Thus in these cases we can skip
+ * reprogramming the LSC.
+ *
+ * original == adjusted can only happen if an interpolation
+ * happened, or if original has an exact entry in sets_. This means
+ * that if original != adjusted, then original was adjusted to
+ * the nearest available entry in sets_, resulting in adjusted.
+ * Clearly, any ct value that is in between original and adjusted
+ * will be adjusted to the same adjusted value, so we can skip
+ * reprogramming the LSC table.
+ *
+ * We also skip updating the original value, as the last one had a
+ * larger bound and thus a larger range of ct values that will be
+ * adjusted to the same adjusted.
+ */
+ if ((lastCt_.original <= ct && ct <= lastCt_.adjusted) ||
+ (lastCt_.adjusted <= ct && ct <= lastCt_.original))
+ return;
+
+ setParameters(params);
+
+ /*
+ * The color temperature matches exactly one of the available LSC tables.
+ */
+ if (sets_.count(ct)) {
+ copyTable(config, sets_[ct]);
+ lastCt_ = { ct, ct };
+ return;
+ }
+
+ /* No shortcuts left; we need to round or interpolate */
+ auto iter = sets_.upper_bound(ct);
+ const Components &set1 = iter->second;
+ const Components &set0 = (--iter)->second;
+ uint32_t ct0 = set0.ct;
+ uint32_t ct1 = set1.ct;
+ uint32_t diff0 = ct - ct0;
+ uint32_t diff1 = ct1 - ct;
+ static constexpr double kThreshold = 0.1;
+ float threshold = kThreshold * (ct1 - ct0);
+
+ if (diff0 < threshold || diff1 < threshold) {
+ const Components &set = diff0 < diff1 ? set0 : set1;
+ LOG(RkISP1Lsc, Debug) << "using LSC table for " << set.ct;
+ copyTable(config, set);
+ lastCt_ = { ct, set.ct };
+ return;
+ }
+
+ /*
+ * ct is not within 10% of the difference between the neighbouring
+ * color temperatures, so we need to interpolate.
+ */
+ LOG(RkISP1Lsc, Debug)
+ << "ct is " << ct << ", interpolating between "
+ << ct0 << " and " << ct1;
+ interpolateTable(config, set0, set1, ct);
+ lastCt_ = { ct, ct };
+}
+
+REGISTER_IPA_ALGORITHM(LensShadingCorrection, "LensShadingCorrection")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/lsc.h b/src/ipa/rkisp1/algorithms/lsc.h
new file mode 100644
index 00000000..5baf5927
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lsc.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Lens Shading Correction control
+ */
+
+#pragma once
+
+#include <map>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class LensShadingCorrection : public Algorithm
+{
+public:
+ LensShadingCorrection();
+ ~LensShadingCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ rkisp1_params_cfg *params) override;
+
+private:
+ struct Components {
+ uint32_t ct;
+ std::vector<uint16_t> r;
+ std::vector<uint16_t> gr;
+ std::vector<uint16_t> gb;
+ std::vector<uint16_t> b;
+ };
+
+ void setParameters(rkisp1_params_cfg *params);
+ void copyTable(rkisp1_cif_isp_lsc_config &config, const Components &set0);
+ void interpolateTable(rkisp1_cif_isp_lsc_config &config,
+ const Components &set0, const Components &set1,
+ const uint32_t ct);
+
+ std::map<uint32_t, Components> sets_;
+ std::vector<double> xSize_;
+ std::vector<double> ySize_;
+ uint16_t xGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ uint16_t yGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ uint16_t xSizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ uint16_t ySizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ struct {
+ uint32_t original;
+ uint32_t adjusted;
+ } lastCt_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/meson.build b/src/ipa/rkisp1/algorithms/meson.build
index 7ec53d89..93a48329 100644
--- a/src/ipa/rkisp1/algorithms/meson.build
+++ b/src/ipa/rkisp1/algorithms/meson.build
@@ -4,4 +4,10 @@ rkisp1_ipa_algorithms = files([
'agc.cpp',
'awb.cpp',
'blc.cpp',
+ 'cproc.cpp',
+ 'dpcc.cpp',
+ 'dpf.cpp',
+ 'filter.cpp',
+ 'gsl.cpp',
+ 'lsc.cpp',
])
diff --git a/src/ipa/rkisp1/data/imx219.yaml b/src/ipa/rkisp1/data/imx219.yaml
index 232d8ae8..cbcc43b8 100644
--- a/src/ipa/rkisp1/data/imx219.yaml
+++ b/src/ipa/rkisp1/data/imx219.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-%YAML 1.2
+%YAML 1.1
---
version: 1
algorithms:
@@ -10,4 +10,109 @@ algorithms:
Gr: 256
Gb: 256
B: 256
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ - ct: 5800
+ r: [
+ 1501, 1480, 1478, 1362, 1179, 1056, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1030, 1053, 1134, 1185, 1520, 1480, 1463, 1179, 1056, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1027, 1046, 1134, 1533, 1471, 1179, 1056, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1039, 1471,
+ 1314, 1068, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1025, 1314, 1150, 1028, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1150, 1050, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1076, 1026,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1052, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1050, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1050, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1050, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1025, 1086, 1037, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1057, 1182, 1071, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1057, 1161,
+ 1345, 1146, 1027, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1036, 1161, 1298, 1612, 1328, 1089, 1025, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1025, 1036, 1161, 1324, 1463, 1884, 1651, 1339, 1103, 1032,
+ 1025, 1024, 1024, 1024, 1024, 1025, 1038, 1101, 1204, 1324, 1463, 1497, 1933,
+ 1884, 1587, 1275, 1079, 1052, 1046, 1046, 1046, 1046, 1055, 1101, 1204, 1336,
+ 1487, 1493, 1476,
+ ]
+ gr: [
+ 1262, 1250, 1094, 1027, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1250, 1095, 1028, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1095, 1030, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1030,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1025, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1041, 1051, 1025, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1051, 1165, 1088,
+ 1051, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1051, 1165, 1261,
+ ]
+ gb: [
+ 1259, 1248, 1092, 1027, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1248, 1092, 1027, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1092, 1029, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1029,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1025, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1041, 1051, 1025, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1052, 1166, 1090,
+ 1051, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1052, 1166, 1266,
+ ]
+ b: [
+ 1380, 1378, 1377, 1247, 1080, 1025, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1030, 1406, 1378, 1284, 1092, 1027, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1406, 1338, 1129, 1029, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1338,
+ 1205, 1043, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1205, 1094, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1116, 1039, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1070, 1025,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1052, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1052, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1052, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1052, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1070, 1025, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1109, 1036, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1057,
+ 1175, 1082, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1057, 1176, 1293, 1172, 1036, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1054, 1185, 1334, 1438, 1294, 1099, 1025, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1054, 1185, 1334, 1334, 1462,
+ 1438, 1226, 1059, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1054, 1185,
+ 1326, 1334, 1334,
+ ]
...
diff --git a/src/ipa/rkisp1/data/imx258.yaml b/src/ipa/rkisp1/data/imx258.yaml
new file mode 100644
index 00000000..43dddf20
--- /dev/null
+++ b/src/ipa/rkisp1/data/imx258.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ #4208x3120_A_70 - A
+ - ct: 2856
+ resolution: 4208x3120
+ r: [1483, 1423, 1410, 1414, 1417, 1384, 1356, 1348, 1349, 1348, 1393, 1392, 1409, 1444, 1460, 1475, 1568, 1462, 1409, 1398, 1391, 1361, 1343, 1328, 1312, 1316, 1325, 1328, 1372, 1395, 1427, 1410, 1440, 1525, 1441, 1366, 1373, 1364, 1338, 1312, 1287, 1270, 1262, 1267, 1305, 1339, 1380, 1402, 1425, 1424, 1510, 1423, 1376, 1375, 1353, 1309, 1253, 1220, 1201, 1192, 1203, 1243, 1286, 1338, 1375, 1427, 1438, 1499, 1405, 1353, 1354, 1331, 1269, 1207, 1169, 1140, 1137, 1145, 1186, 1246, 1309, 1373, 1399, 1438, 1512, 1391, 1349, 1351, 1306, 1236, 1174, 1121, 1089, 1083, 1098, 1139, 1202, 1276, 1349, 1384, 1428, 1494, 1401, 1337, 1336, 1277, 1211, 1138, 1082, 1057, 1053, 1067, 1110, 1166, 1253, 1331, 1375, 1417, 1485, 1401, 1341, 1316, 1269, 1184, 1115, 1063, 1037, 1029, 1042, 1082, 1144, 1234, 1322, 1368, 1405, 1480, 1387, 1329, 1305, 1257, 1179, 1104, 1049, 1028, 1024, 1037, 1078, 1144, 1231, 1312, 1363, 1404, 1456, 1401, 1341, 1313, 1254, 1177, 1104, 1053, 1041, 1026, 1042, 1082, 1149, 1229, 1322, 1372, 1397, 1457, 1397, 1344, 1312, 1271, 1191, 1122, 1070, 1052, 1044, 1061, 1097, 1166, 1245, 1334, 1382, 1405, 1476, 1400, 1342, 1333, 1293, 1213, 1146, 1099, 1073, 1061, 1081, 1134, 1202, 1273, 1332, 1380, 1411, 1484, 1414, 1350, 1344, 1301, 1251, 1181, 1133, 1109, 1100, 1118, 1164, 1218, 1299, 1338, 1373, 1408, 1459, 1397, 1360, 1342, 1339, 1293, 1231, 1181, 1149, 1155, 1161, 1202, 1256, 1315, 1364, 1383, 1396, 1479, 1382, 1342, 1358, 1346, 1314, 1284, 1231, 1210, 1198, 1224, 1251, 1303, 1338, 1361, 1381, 1394, 1455, 1386, 1338, 1342, 1341, 1326, 1296, 1274, 1254, 1249, 1262, 1280, 1319, 1357, 1367, 1373, 1379, 1462, 1426, 1340, 1356, 1354, 1330, 1344, 1291, 1275, 1255, 1272, 1298, 1333, 1374, 1390, 1393, 1418, 1580, ]
+ gr: [1274, 1203, 1200, 1184, 1165, 1167, 1155, 1160, 1155, 1158, 1164, 1181, 1196, 1223, 1219, 1220, 1369, 1233, 1172, 1161, 1158, 1146, 1149, 1142, 1129, 1133, 1137, 1144, 1155, 1173, 1189, 1204, 1205, 1268, 1215, 1172, 1148, 1137, 1135, 1124, 1123, 1114, 1110, 1116, 1131, 1149, 1161, 1175, 1191, 1220, 1263, 1185, 1153, 1140, 1137, 1119, 1106, 1094, 1088, 1086, 1099, 1107, 1125, 1152, 1154, 1187, 1209, 1255, 1195, 1141, 1133, 1133, 1112, 1083, 1081, 1066, 1057, 1067, 1088, 1103, 1134, 1154, 1172, 1199, 1255, 1186, 1136, 1127, 1121, 1094, 1077, 1055, 1044, 1040, 1048, 1067, 1086, 1121, 1146, 1155, 1185, 1258, 1177, 1127, 1117, 1104, 1082, 1063, 1044, 1038, 1027, 1036, 1057, 1070, 1101, 1138, 1151, 1177, 1245, 1184, 1116, 1119, 1098, 1070, 1045, 1037, 1030, 1027, 1026, 1045, 1062, 1099, 1132, 1149, 1179, 1238, 1172, 1120, 1113, 1100, 1070, 1042, 1029, 1027, 1029, 1027, 1042, 1066, 1088, 1126, 1149, 1174, 1223, 1162, 1118, 1117, 1093, 1065, 1039, 1030, 1028, 1022, 1028, 1045, 1060, 1101, 1134, 1146, 1165, 1246, 1172, 1116, 1119, 1102, 1075, 1046, 1029, 1032, 1030, 1038, 1049, 1073, 1097, 1132, 1146, 1168, 1231, 1178, 1118, 1123, 1111, 1083, 1062, 1041, 1038, 1033, 1041, 1054, 1074, 1109, 1135, 1144, 1175, 1244, 1193, 1136, 1123, 1118, 1100, 1070, 1045, 1036, 1044, 1047, 1067, 1090, 1116, 1135, 1158, 1174, 1232, 1198, 1142, 1127, 1130, 1107, 1085, 1068, 1060, 1057, 1069, 1079, 1102, 1115, 1124, 1154, 1178, 1241, 1192, 1136, 1125, 1113, 1116, 1096, 1081, 1075, 1075, 1088, 1097, 1116, 1124, 1135, 1155, 1177, 1232, 1183, 1142, 1119, 1113, 1099, 1101, 1088, 1084, 1085, 1089, 1103, 1109, 1122, 1133, 1147, 1175, 1258, 1238, 1162, 1161, 1143, 1124, 1131, 1108, 1111, 1107, 1115, 1116, 1138, 1137, 1150, 1163, 1186, 1381, ]
+ gb: [1277, 1217, 1179, 1179, 1163, 1158, 1151, 1150, 1149, 1143, 1151, 1172, 1184, 1207, 1216, 1246, 1375, 1242, 1194, 1166, 1151, 1144, 1145, 1135, 1130, 1129, 1132, 1137, 1154, 1166, 1189, 1207, 1210, 1290, 1229, 1177, 1153, 1144, 1140, 1135, 1124, 1110, 1104, 1115, 1126, 1148, 1162, 1171, 1199, 1220, 1268, 1226, 1163, 1152, 1138, 1130, 1111, 1091, 1088, 1086, 1089, 1097, 1126, 1147, 1164, 1187, 1206, 1273, 1212, 1151, 1141, 1132, 1117, 1093, 1075, 1060, 1059, 1062, 1088, 1108, 1133, 1162, 1168, 1204, 1278, 1207, 1141, 1130, 1126, 1095, 1075, 1063, 1046, 1044, 1054, 1069, 1084, 1120, 1153, 1167, 1195, 1269, 1200, 1141, 1126, 1113, 1092, 1063, 1045, 1033, 1036, 1038, 1055, 1080, 1117, 1139, 1165, 1182, 1262, 1195, 1130, 1128, 1115, 1079, 1052, 1041, 1031, 1024, 1028, 1046, 1072, 1110, 1141, 1160, 1175, 1258, 1189, 1136, 1124, 1105, 1077, 1049, 1029, 1021, 1029, 1033, 1040, 1074, 1108, 1143, 1152, 1173, 1237, 1200, 1130, 1126, 1109, 1080, 1050, 1030, 1031, 1027, 1031, 1043, 1069, 1099, 1141, 1152, 1168, 1249, 1203, 1132, 1124, 1113, 1082, 1058, 1032, 1030, 1024, 1033, 1050, 1083, 1109, 1151, 1156, 1178, 1253, 1204, 1130, 1128, 1112, 1088, 1060, 1045, 1030, 1027, 1036, 1058, 1082, 1120, 1145, 1160, 1176, 1246, 1195, 1137, 1123, 1121, 1102, 1072, 1046, 1037, 1037, 1047, 1072, 1090, 1125, 1140, 1158, 1177, 1252, 1209, 1147, 1128, 1125, 1114, 1088, 1063, 1053, 1051, 1058, 1084, 1101, 1128, 1140, 1159, 1176, 1243, 1195, 1138, 1130, 1127, 1113, 1101, 1076, 1071, 1067, 1082, 1087, 1111, 1125, 1140, 1151, 1183, 1235, 1189, 1137, 1126, 1122, 1112, 1104, 1091, 1089, 1081, 1085, 1103, 1112, 1125, 1140, 1157, 1175, 1242, 1234, 1181, 1161, 1150, 1127, 1117, 1101, 1094, 1094, 1102, 1117, 1130, 1138, 1155, 1171, 1192, 1399, ]
+ b: [1309, 1209, 1169, 1157, 1149, 1136, 1116, 1117, 1126, 1128, 1127, 1141, 1143, 1182, 1196, 1209, 1398, 1231, 1176, 1140, 1123, 1119, 1113, 1111, 1122, 1105, 1117, 1116, 1135, 1130, 1135, 1171, 1169, 1271, 1251, 1154, 1132, 1118, 1104, 1109, 1103, 1094, 1088, 1104, 1093, 1120, 1130, 1135, 1151, 1180, 1267, 1219, 1136, 1111, 1125, 1106, 1107, 1082, 1074, 1077, 1074, 1101, 1112, 1117, 1136, 1139, 1173, 1256, 1205, 1125, 1108, 1118, 1110, 1091, 1081, 1065, 1068, 1065, 1086, 1087, 1105, 1123, 1119, 1156, 1249, 1195, 1106, 1112, 1101, 1085, 1068, 1064, 1053, 1043, 1048, 1068, 1073, 1095, 1117, 1118, 1123, 1251, 1193, 1101, 1091, 1097, 1081, 1052, 1043, 1045, 1041, 1045, 1052, 1065, 1100, 1112, 1112, 1123, 1200, 1180, 1096, 1103, 1083, 1069, 1053, 1045, 1035, 1034, 1035, 1045, 1062, 1087, 1108, 1113, 1113, 1228, 1176, 1093, 1095, 1080, 1062, 1055, 1035, 1033, 1028, 1037, 1039, 1064, 1080, 1115, 1121, 1120, 1202, 1174, 1086, 1087, 1078, 1064, 1049, 1037, 1027, 1022, 1031, 1045, 1058, 1087, 1113, 1108, 1113, 1207, 1200, 1095, 1102, 1092, 1072, 1052, 1043, 1033, 1024, 1033, 1043, 1069, 1095, 1112, 1128, 1123, 1220, 1215, 1101, 1091, 1096, 1080, 1059, 1051, 1040, 1031, 1040, 1064, 1064, 1095, 1111, 1112, 1141, 1222, 1198, 1119, 1108, 1097, 1080, 1059, 1050, 1043, 1034, 1043, 1063, 1073, 1100, 1107, 1114, 1131, 1212, 1197, 1136, 1094, 1109, 1096, 1078, 1054, 1052, 1051, 1060, 1063, 1078, 1101, 1109, 1116, 1142, 1256, 1212, 1112, 1098, 1097, 1094, 1084, 1074, 1061, 1051, 1057, 1064, 1080, 1089, 1102, 1115, 1136, 1227, 1185, 1118, 1081, 1059, 1072, 1068, 1057, 1049, 1048, 1054, 1066, 1058, 1067, 1096, 1109, 1143, 1223, 1291, 1173, 1131, 1113, 1087, 1077, 1090, 1081, 1090, 1086, 1090, 1092, 1103, 1144, 1149, 1216, 1387, ]
+ #4208x3120_D50_70 - D50
+ - ct: 5003
+ resolution: 4208x3120
+ r: [1240, 1212, 1218, 1191, 1191, 1171, 1136, 1144, 1113, 1148, 1182, 1166, 1210, 1211, 1213, 1240, 1336, 1236, 1193, 1176, 1158, 1147, 1126, 1107, 1122, 1107, 1107, 1110, 1146, 1176, 1194, 1195, 1219, 1259, 1210, 1157, 1156, 1153, 1123, 1115, 1094, 1074, 1078, 1081, 1098, 1130, 1163, 1170, 1179, 1220, 1284, 1228, 1146, 1159, 1132, 1101, 1074, 1059, 1053, 1044, 1060, 1072, 1102, 1131, 1156, 1186, 1227, 1272, 1219, 1176, 1150, 1124, 1091, 1043, 1036, 1025, 1025, 1031, 1042, 1076, 1095, 1155, 1188, 1209, 1296, 1206, 1161, 1128, 1101, 1065, 1032, 1019, 1018, 1027, 1018, 1034, 1057, 1102, 1139, 1161, 1211, 1274, 1184, 1133, 1119, 1097, 1042, 1018, 1020, 1027, 1034, 1030, 1032, 1042, 1075, 1119, 1164, 1199, 1270, 1205, 1124, 1114, 1086, 1033, 1015, 1023, 1039, 1039, 1033, 1026, 1041, 1074, 1111, 1142, 1206, 1278, 1193, 1118, 1098, 1084, 1023, 1003, 1016, 1047, 1059, 1038, 1025, 1046, 1063, 1124, 1148, 1190, 1238, 1191, 1124, 1107, 1069, 1027, 1009, 1012, 1036, 1045, 1036, 1020, 1024, 1058, 1118, 1158, 1183, 1262, 1213, 1121, 1112, 1076, 1030, 1012, 1003, 1019, 1028, 1013, 1020, 1036, 1078, 1123, 1155, 1176, 1228, 1221, 1135, 1117, 1105, 1055, 1020, 1005, 1007, 1007, 1004, 1017, 1048, 1088, 1131, 1169, 1183, 1280, 1209, 1141, 1125, 1105, 1074, 1025, 1012, 1008, 1000, 1011, 1024, 1050, 1113, 1128, 1154, 1199, 1290, 1217, 1142, 1134, 1120, 1101, 1054, 1028, 1014, 1006, 1017, 1040, 1078, 1105, 1136, 1164, 1188, 1250, 1195, 1130, 1148, 1120, 1108, 1083, 1053, 1041, 1032, 1061, 1067, 1097, 1127, 1136, 1152, 1181, 1227, 1166, 1145, 1140, 1141, 1119, 1092, 1075, 1072, 1052, 1065, 1089, 1107, 1147, 1154, 1158, 1183, 1230, 1136, 1147, 1150, 1168, 1139, 1113, 1098, 1055, 1048, 1072, 1079, 1129, 1147, 1173, 1188, 1181, 1283, ]
+ gr: [1246, 1183, 1160, 1143, 1145, 1138, 1113, 1111, 1117, 1116, 1132, 1145, 1167, 1167, 1196, 1197, 1335, 1205, 1152, 1123, 1122, 1123, 1103, 1107, 1102, 1097, 1102, 1099, 1128, 1141, 1157, 1152, 1184, 1242, 1204, 1141, 1112, 1106, 1102, 1093, 1096, 1085, 1076, 1085, 1094, 1107, 1123, 1146, 1162, 1178, 1218, 1169, 1130, 1114, 1100, 1096, 1083, 1072, 1059, 1065, 1070, 1087, 1096, 1116, 1134, 1155, 1174, 1238, 1159, 1126, 1105, 1102, 1083, 1062, 1060, 1049, 1047, 1054, 1063, 1084, 1111, 1131, 1140, 1164, 1243, 1167, 1114, 1105, 1088, 1067, 1047, 1034, 1034, 1028, 1042, 1042, 1059, 1096, 1114, 1135, 1170, 1200, 1156, 1101, 1098, 1089, 1068, 1048, 1027, 1034, 1029, 1032, 1047, 1043, 1088, 1111, 1130, 1160, 1201, 1143, 1100, 1086, 1087, 1051, 1034, 1029, 1028, 1030, 1019, 1033, 1044, 1087, 1109, 1124, 1155, 1211, 1148, 1098, 1088, 1077, 1058, 1037, 1026, 1025, 1034, 1033, 1031, 1054, 1074, 1107, 1134, 1159, 1211, 1150, 1090, 1084, 1074, 1056, 1029, 1020, 1028, 1025, 1027, 1031, 1044, 1080, 1109, 1126, 1152, 1208, 1131, 1101, 1088, 1073, 1048, 1035, 1030, 1026, 1024, 1034, 1038, 1053, 1083, 1104, 1124, 1160, 1206, 1147, 1103, 1082, 1082, 1060, 1035, 1026, 1023, 1018, 1031, 1044, 1058, 1096, 1114, 1128, 1153, 1208, 1170, 1112, 1098, 1088, 1070, 1049, 1027, 1027, 1023, 1031, 1046, 1071, 1085, 1106, 1129, 1150, 1228, 1164, 1111, 1101, 1089, 1078, 1058, 1040, 1030, 1032, 1037, 1060, 1073, 1102, 1097, 1125, 1156, 1223, 1181, 1115, 1097, 1093, 1083, 1072, 1056, 1047, 1041, 1057, 1071, 1079, 1081, 1102, 1124, 1141, 1195, 1170, 1109, 1091, 1089, 1061, 1074, 1049, 1054, 1052, 1057, 1067, 1076, 1097, 1106, 1121, 1141, 1211, 1173, 1129, 1108, 1099, 1093, 1092, 1076, 1063, 1057, 1065, 1090, 1107, 1117, 1140, 1123, 1175, 1343, ]
+ gb: [1238, 1183, 1160, 1160, 1134, 1134, 1124, 1108, 1131, 1127, 1124, 1145, 1172, 1188, 1201, 1217, 1349, 1216, 1160, 1128, 1120, 1117, 1110, 1108, 1105, 1102, 1111, 1114, 1125, 1144, 1160, 1162, 1192, 1260, 1212, 1141, 1127, 1118, 1101, 1104, 1103, 1086, 1077, 1086, 1105, 1116, 1126, 1147, 1167, 1191, 1242, 1191, 1130, 1126, 1103, 1093, 1082, 1074, 1070, 1064, 1064, 1079, 1099, 1113, 1132, 1156, 1185, 1247, 1175, 1117, 1114, 1109, 1081, 1067, 1061, 1047, 1044, 1051, 1066, 1083, 1108, 1134, 1141, 1180, 1248, 1187, 1108, 1106, 1095, 1076, 1052, 1044, 1036, 1034, 1042, 1052, 1070, 1105, 1124, 1140, 1161, 1228, 1171, 1091, 1095, 1088, 1069, 1041, 1035, 1034, 1034, 1037, 1048, 1062, 1090, 1120, 1129, 1165, 1223, 1158, 1108, 1093, 1080, 1052, 1030, 1034, 1027, 1030, 1028, 1034, 1054, 1083, 1112, 1133, 1141, 1208, 1158, 1099, 1091, 1075, 1047, 1031, 1017, 1021, 1035, 1027, 1033, 1054, 1088, 1110, 1120, 1146, 1211, 1171, 1099, 1093, 1079, 1056, 1029, 1021, 1030, 1025, 1031, 1037, 1047, 1077, 1116, 1122, 1132, 1203, 1179, 1093, 1087, 1076, 1053, 1038, 1028, 1024, 1024, 1024, 1040, 1058, 1082, 1108, 1114, 1144, 1198, 1167, 1091, 1091, 1087, 1059, 1047, 1029, 1016, 1021, 1036, 1045, 1066, 1093, 1113, 1116, 1144, 1205, 1159, 1113, 1099, 1091, 1069, 1047, 1029, 1029, 1024, 1037, 1054, 1072, 1088, 1109, 1125, 1150, 1200, 1186, 1114, 1097, 1098, 1087, 1065, 1035, 1033, 1043, 1042, 1054, 1076, 1089, 1111, 1126, 1130, 1214, 1153, 1106, 1100, 1090, 1086, 1082, 1057, 1059, 1053, 1059, 1066, 1077, 1088, 1113, 1117, 1144, 1203, 1147, 1107, 1110, 1090, 1088, 1072, 1070, 1060, 1062, 1058, 1074, 1087, 1096, 1109, 1126, 1150, 1216, 1170, 1145, 1128, 1108, 1088, 1110, 1085, 1070, 1064, 1078, 1077, 1101, 1107, 1136, 1148, 1163, 1345, ]
+ b: [1252, 1185, 1146, 1139, 1147, 1130, 1114, 1111, 1122, 1111, 1121, 1123, 1144, 1150, 1171, 1167, 1303, 1187, 1152, 1125, 1101, 1104, 1096, 1101, 1099, 1093, 1096, 1098, 1103, 1118, 1141, 1160, 1156, 1226, 1222, 1125, 1112, 1118, 1104, 1094, 1083, 1073, 1073, 1094, 1099, 1103, 1114, 1133, 1146, 1174, 1212, 1162, 1123, 1104, 1110, 1100, 1081, 1066, 1065, 1057, 1053, 1072, 1094, 1107, 1117, 1136, 1162, 1226, 1197, 1124, 1088, 1092, 1084, 1066, 1055, 1051, 1044, 1049, 1061, 1081, 1096, 1102, 1134, 1143, 1234, 1171, 1110, 1099, 1075, 1070, 1051, 1052, 1030, 1030, 1035, 1055, 1071, 1092, 1100, 1113, 1128, 1214, 1174, 1099, 1080, 1069, 1054, 1047, 1032, 1031, 1027, 1034, 1042, 1061, 1086, 1091, 1113, 1139, 1222, 1156, 1088, 1089, 1072, 1051, 1036, 1032, 1026, 1030, 1024, 1040, 1047, 1074, 1091, 1109, 1131, 1198, 1158, 1090, 1079, 1071, 1047, 1038, 1031, 1028, 1027, 1028, 1029, 1046, 1068, 1087, 1105, 1122, 1196, 1173, 1098, 1080, 1060, 1040, 1036, 1022, 1019, 1022, 1029, 1029, 1045, 1077, 1094, 1103, 1109, 1189, 1170, 1096, 1070, 1063, 1048, 1033, 1026, 1023, 1016, 1021, 1037, 1053, 1068, 1098, 1107, 1128, 1195, 1166, 1099, 1086, 1066, 1061, 1040, 1022, 1022, 1028, 1027, 1041, 1057, 1086, 1094, 1103, 1124, 1188, 1202, 1113, 1081, 1083, 1071, 1040, 1025, 1024, 1025, 1019, 1055, 1055, 1081, 1099, 1112, 1128, 1202, 1171, 1108, 1083, 1084, 1078, 1051, 1043, 1020, 1037, 1037, 1049, 1072, 1069, 1100, 1107, 1115, 1176, 1180, 1106, 1094, 1077, 1068, 1053, 1050, 1035, 1041, 1038, 1062, 1068, 1068, 1084, 1098, 1125, 1184, 1164, 1104, 1077, 1057, 1064, 1049, 1039, 1041, 1036, 1041, 1042, 1058, 1064, 1087, 1099, 1111, 1173, 1209, 1137, 1099, 1083, 1076, 1072, 1077, 1065, 1066, 1065, 1061, 1081, 1096, 1135, 1126, 1150, 1333, ]
+ #4208x3120_D65_70 - D65
+ - ct: 6504
+ resolution: 4208x3120
+ r: [1359, 1336, 1313, 1273, 1274, 1250, 1250, 1218, 1222, 1223, 1240, 1266, 1308, 1327, 1333, 1336, 1456, 1359, 1286, 1256, 1249, 1235, 1235, 1216, 1219, 1187, 1205, 1216, 1240, 1267, 1277, 1303, 1311, 1420, 1326, 1254, 1250, 1239, 1212, 1207, 1191, 1181, 1176, 1181, 1187, 1226, 1241, 1281, 1295, 1326, 1391, 1304, 1253, 1234, 1234, 1209, 1174, 1156, 1147, 1131, 1139, 1168, 1196, 1227, 1265, 1282, 1293, 1385, 1302, 1242, 1224, 1216, 1171, 1140, 1112, 1098, 1087, 1098, 1124, 1177, 1206, 1245, 1266, 1310, 1389, 1327, 1227, 1231, 1195, 1156, 1116, 1094, 1070, 1067, 1073, 1101, 1151, 1190, 1223, 1251, 1281, 1402, 1285, 1229, 1203, 1184, 1135, 1093, 1063, 1047, 1041, 1050, 1083, 1119, 1176, 1211, 1248, 1288, 1388, 1269, 1210, 1215, 1173, 1118, 1078, 1046, 1028, 1025, 1037, 1059, 1103, 1170, 1213, 1230, 1268, 1355, 1295, 1208, 1203, 1171, 1124, 1070, 1041, 1024, 1027, 1030, 1057, 1094, 1168, 1206, 1252, 1270, 1364, 1293, 1196, 1187, 1156, 1110, 1075, 1039, 1022, 1022, 1028, 1065, 1096, 1166, 1213, 1245, 1273, 1349, 1291, 1213, 1203, 1162, 1131, 1079, 1053, 1038, 1029, 1044, 1080, 1119, 1176, 1225, 1243, 1271, 1354, 1284, 1222, 1202, 1186, 1136, 1097, 1063, 1054, 1041, 1054, 1083, 1131, 1186, 1232, 1256, 1276, 1360, 1290, 1237, 1210, 1207, 1166, 1116, 1076, 1066, 1070, 1080, 1109, 1152, 1188, 1230, 1240, 1293, 1341, 1304, 1231, 1229, 1210, 1177, 1153, 1128, 1097, 1105, 1108, 1140, 1170, 1213, 1224, 1260, 1282, 1357, 1299, 1237, 1218, 1218, 1202, 1171, 1144, 1135, 1131, 1143, 1161, 1189, 1221, 1233, 1261, 1271, 1346, 1262, 1216, 1229, 1218, 1191, 1187, 1162, 1161, 1148, 1153, 1180, 1201, 1220, 1234, 1251, 1250, 1352, 1294, 1234, 1242, 1240, 1246, 1200, 1178, 1172, 1137, 1154, 1187, 1214, 1252, 1251, 1247, 1296, 1456, ]
+ gr: [1240, 1187, 1158, 1152, 1144, 1129, 1130, 1118, 1115, 1113, 1119, 1141, 1156, 1172, 1180, 1199, 1330, 1223, 1153, 1127, 1123, 1115, 1104, 1104, 1095, 1100, 1107, 1110, 1121, 1137, 1156, 1169, 1179, 1261, 1205, 1138, 1122, 1108, 1101, 1104, 1098, 1088, 1083, 1090, 1106, 1119, 1125, 1144, 1163, 1186, 1236, 1170, 1122, 1112, 1101, 1091, 1089, 1076, 1068, 1061, 1072, 1084, 1101, 1118, 1134, 1156, 1179, 1243, 1162, 1120, 1105, 1105, 1088, 1067, 1061, 1050, 1050, 1057, 1070, 1088, 1112, 1127, 1145, 1166, 1232, 1163, 1108, 1111, 1099, 1079, 1054, 1046, 1041, 1030, 1040, 1053, 1074, 1098, 1120, 1140, 1170, 1226, 1158, 1105, 1094, 1099, 1064, 1048, 1034, 1036, 1028, 1029, 1049, 1055, 1089, 1116, 1135, 1166, 1218, 1142, 1107, 1094, 1092, 1061, 1041, 1030, 1024, 1025, 1028, 1036, 1053, 1087, 1110, 1128, 1153, 1223, 1142, 1098, 1092, 1084, 1056, 1036, 1025, 1024, 1027, 1024, 1038, 1055, 1082, 1108, 1132, 1153, 1203, 1155, 1098, 1094, 1080, 1056, 1034, 1023, 1025, 1022, 1025, 1036, 1053, 1078, 1112, 1126, 1144, 1212, 1163, 1096, 1092, 1083, 1059, 1039, 1027, 1023, 1028, 1026, 1044, 1056, 1091, 1114, 1130, 1149, 1204, 1152, 1103, 1090, 1089, 1065, 1045, 1031, 1028, 1025, 1035, 1048, 1064, 1092, 1116, 1131, 1157, 1203, 1162, 1100, 1098, 1093, 1076, 1049, 1033, 1030, 1030, 1040, 1050, 1067, 1094, 1103, 1127, 1154, 1221, 1162, 1112, 1099, 1095, 1079, 1064, 1042, 1033, 1034, 1048, 1061, 1077, 1091, 1108, 1126, 1148, 1213, 1154, 1112, 1106, 1095, 1081, 1065, 1056, 1052, 1050, 1059, 1071, 1082, 1091, 1102, 1129, 1149, 1211, 1157, 1106, 1092, 1081, 1066, 1072, 1064, 1048, 1056, 1061, 1066, 1076, 1091, 1107, 1122, 1145, 1207, 1204, 1127, 1117, 1106, 1098, 1081, 1073, 1068, 1062, 1068, 1081, 1107, 1102, 1127, 1148, 1170, 1353, ]
+ gb: [1240, 1177, 1157, 1143, 1129, 1130, 1118, 1112, 1123, 1123, 1123, 1137, 1159, 1181, 1197, 1206, 1354, 1217, 1153, 1130, 1124, 1109, 1114, 1105, 1108, 1116, 1110, 1114, 1131, 1145, 1145, 1163, 1183, 1249, 1197, 1134, 1124, 1107, 1115, 1104, 1100, 1085, 1091, 1097, 1102, 1110, 1133, 1145, 1155, 1190, 1227, 1191, 1125, 1107, 1105, 1093, 1084, 1072, 1066, 1071, 1072, 1081, 1106, 1124, 1129, 1153, 1178, 1238, 1193, 1108, 1104, 1098, 1085, 1072, 1059, 1052, 1048, 1059, 1075, 1089, 1105, 1126, 1146, 1162, 1233, 1166, 1098, 1099, 1091, 1078, 1053, 1043, 1036, 1035, 1045, 1058, 1070, 1100, 1113, 1128, 1156, 1230, 1173, 1100, 1087, 1087, 1064, 1046, 1037, 1031, 1031, 1034, 1047, 1063, 1092, 1107, 1112, 1153, 1228, 1169, 1089, 1089, 1079, 1057, 1043, 1030, 1030, 1027, 1027, 1035, 1057, 1087, 1111, 1125, 1136, 1218, 1166, 1097, 1087, 1079, 1056, 1035, 1022, 1021, 1027, 1022, 1035, 1053, 1083, 1109, 1118, 1138, 1198, 1151, 1100, 1087, 1077, 1057, 1034, 1023, 1024, 1027, 1025, 1036, 1051, 1083, 1109, 1116, 1129, 1215, 1159, 1096, 1091, 1079, 1053, 1037, 1026, 1021, 1020, 1020, 1039, 1063, 1086, 1113, 1116, 1134, 1214, 1158, 1096, 1091, 1087, 1065, 1043, 1034, 1025, 1020, 1028, 1046, 1059, 1088, 1109, 1119, 1130, 1202, 1168, 1101, 1091, 1084, 1074, 1050, 1029, 1028, 1026, 1035, 1055, 1072, 1099, 1105, 1121, 1138, 1204, 1160, 1104, 1093, 1094, 1079, 1067, 1043, 1036, 1036, 1048, 1057, 1081, 1089, 1107, 1118, 1140, 1222, 1158, 1101, 1096, 1090, 1082, 1076, 1059, 1052, 1053, 1063, 1071, 1086, 1094, 1103, 1119, 1134, 1206, 1150, 1105, 1098, 1093, 1082, 1077, 1067, 1063, 1065, 1069, 1081, 1081, 1088, 1108, 1123, 1138, 1211, 1198, 1133, 1114, 1117, 1097, 1093, 1076, 1073, 1067, 1077, 1076, 1089, 1101, 1119, 1154, 1163, 1346, ]
+ b: [1241, 1188, 1165, 1151, 1131, 1127, 1134, 1115, 1122, 1127, 1131, 1136, 1154, 1165, 1173, 1161, 1319, 1210, 1153, 1138, 1120, 1111, 1114, 1118, 1124, 1108, 1118, 1121, 1123, 1132, 1151, 1161, 1150, 1244, 1224, 1149, 1118, 1108, 1107, 1107, 1103, 1098, 1091, 1103, 1103, 1121, 1124, 1135, 1167, 1177, 1224, 1195, 1130, 1099, 1108, 1101, 1083, 1081, 1078, 1074, 1084, 1086, 1097, 1115, 1128, 1145, 1181, 1211, 1191, 1111, 1109, 1098, 1087, 1081, 1071, 1059, 1053, 1064, 1078, 1091, 1109, 1127, 1139, 1167, 1226, 1192, 1111, 1097, 1098, 1072, 1064, 1050, 1042, 1040, 1046, 1053, 1077, 1099, 1113, 1130, 1152, 1215, 1179, 1106, 1093, 1084, 1070, 1055, 1039, 1037, 1034, 1033, 1046, 1067, 1088, 1112, 1120, 1150, 1220, 1178, 1092, 1097, 1085, 1066, 1049, 1033, 1032, 1026, 1028, 1038, 1058, 1081, 1112, 1120, 1137, 1208, 1170, 1103, 1096, 1082, 1063, 1038, 1035, 1025, 1026, 1027, 1035, 1060, 1075, 1109, 1122, 1133, 1214, 1175, 1095, 1097, 1074, 1061, 1039, 1029, 1028, 1022, 1025, 1033, 1049, 1083, 1107, 1117, 1125, 1212, 1179, 1097, 1091, 1076, 1062, 1045, 1030, 1031, 1027, 1031, 1039, 1055, 1082, 1109, 1114, 1144, 1204, 1178, 1102, 1080, 1087, 1060, 1052, 1027, 1028, 1025, 1028, 1043, 1067, 1093, 1113, 1121, 1123, 1189, 1191, 1117, 1100, 1092, 1079, 1058, 1037, 1037, 1020, 1037, 1058, 1065, 1092, 1101, 1115, 1140, 1194, 1173, 1120, 1096, 1085, 1085, 1065, 1048, 1039, 1036, 1046, 1053, 1076, 1096, 1099, 1114, 1140, 1195, 1180, 1105, 1090, 1079, 1073, 1066, 1056, 1049, 1043, 1057, 1061, 1077, 1081, 1090, 1115, 1131, 1180, 1154, 1095, 1084, 1061, 1055, 1056, 1045, 1043, 1039, 1041, 1051, 1067, 1077, 1092, 1108, 1122, 1197, 1210, 1139, 1117, 1112, 1088, 1097, 1084, 1073, 1074, 1065, 1079, 1091, 1103, 1131, 1144, 1154, 1356, ]
+ #4208x3120_D75_70 - D75
+ - ct: 7504
+ resolution: 4208x3120
+ r: [2718, 2443, 2251, 2101, 1949, 1828, 1725, 1659, 1637, 1656, 1692, 1787, 1913, 2038, 2175, 2358, 2612, 2566, 2301, 2129, 1946, 1798, 1654, 1562, 1501, 1474, 1484, 1541, 1628, 1753, 1900, 2056, 2216, 2458, 2439, 2204, 2002, 1839, 1664, 1534, 1419, 1372, 1340, 1357, 1403, 1489, 1621, 1784, 1950, 2114, 2358, 2344, 2108, 1932, 1723, 1559, 1413, 1321, 1258, 1239, 1246, 1293, 1388, 1512, 1675, 1846, 2036, 2269, 2294, 2047, 1842, 1635, 1464, 1328, 1231, 1178, 1144, 1167, 1208, 1298, 1419, 1582, 1769, 1962, 2198, 2234, 1977, 1769, 1556, 1393, 1262, 1164, 1108, 1086, 1096, 1146, 1232, 1350, 1513, 1700, 1913, 2137, 2206, 1942, 1733, 1515, 1345, 1216, 1120, 1066, 1045, 1060, 1099, 1182, 1316, 1462, 1656, 1868, 2131, 2182, 1922, 1685, 1495, 1315, 1188, 1092, 1045, 1025, 1037, 1080, 1160, 1283, 1442, 1624, 1853, 2102, 2193, 1910, 1702, 1477, 1310, 1179, 1087, 1034, 1024, 1029, 1069, 1163, 1278, 1441, 1624, 1846, 2081, 2191, 1936, 1698, 1495, 1325, 1192, 1100, 1052, 1033, 1042, 1082, 1166, 1291, 1448, 1634, 1852, 2118, 2209, 1957, 1732, 1534, 1357, 1223, 1125, 1078, 1062, 1066, 1113, 1204, 1324, 1486, 1665, 1895, 2127, 2267, 2018, 1789, 1577, 1407, 1280, 1181, 1124, 1105, 1113, 1166, 1252, 1388, 1539, 1724, 1936, 2180, 2319, 2074, 1867, 1659, 1491, 1354, 1248, 1192, 1175, 1191, 1236, 1333, 1441, 1618, 1798, 2005, 2249, 2399, 2148, 1955, 1752, 1578, 1442, 1351, 1293, 1272, 1286, 1334, 1418, 1547, 1709, 1872, 2085, 2297, 2497, 2217, 2069, 1857, 1694, 1560, 1458, 1403, 1384, 1400, 1443, 1537, 1670, 1815, 1991, 2157, 2412, 2594, 2341, 2147, 2004, 1827, 1693, 1600, 1537, 1521, 1524, 1576, 1665, 1788, 1941, 2083, 2257, 2529, 2745, 2483, 2315, 2146, 2006, 1868, 1779, 1701, 1679, 1704, 1744, 1845, 1954, 2087, 2219, 2407, 2701, ]
+ gr: [2344, 2089, 1940, 1831, 1739, 1672, 1602, 1564, 1546, 1553, 1585, 1636, 1713, 1798, 1899, 2031, 2234, 2182, 1973, 1842, 1732, 1637, 1548, 1485, 1448, 1422, 1438, 1466, 1527, 1594, 1695, 1784, 1902, 2122, 2082, 1884, 1773, 1653, 1549, 1465, 1398, 1351, 1329, 1338, 1376, 1435, 1516, 1611, 1725, 1828, 2008, 1997, 1821, 1706, 1585, 1480, 1382, 1319, 1261, 1244, 1253, 1291, 1352, 1439, 1540, 1647, 1772, 1932, 1947, 1773, 1655, 1522, 1409, 1310, 1239, 1184, 1161, 1174, 1213, 1284, 1368, 1480, 1601, 1717, 1882, 1904, 1739, 1605, 1470, 1360, 1257, 1173, 1124, 1094, 1111, 1149, 1221, 1320, 1433, 1550, 1678, 1844, 1878, 1711, 1571, 1443, 1317, 1213, 1126, 1077, 1057, 1066, 1105, 1180, 1279, 1400, 1515, 1652, 1819, 1862, 1687, 1556, 1420, 1299, 1183, 1102, 1048, 1029, 1041, 1081, 1155, 1258, 1374, 1495, 1634, 1800, 1856, 1692, 1556, 1415, 1289, 1176, 1095, 1044, 1024, 1033, 1073, 1145, 1247, 1370, 1492, 1626, 1800, 1869, 1697, 1555, 1419, 1303, 1190, 1104, 1054, 1040, 1045, 1085, 1154, 1260, 1373, 1511, 1632, 1804, 1887, 1717, 1571, 1440, 1323, 1216, 1128, 1077, 1066, 1069, 1109, 1182, 1284, 1398, 1520, 1656, 1831, 1910, 1751, 1607, 1480, 1360, 1261, 1173, 1123, 1100, 1114, 1154, 1226, 1326, 1444, 1555, 1689, 1856, 1962, 1793, 1656, 1522, 1416, 1315, 1237, 1180, 1166, 1176, 1214, 1288, 1375, 1486, 1603, 1722, 1910, 2020, 1845, 1710, 1586, 1477, 1387, 1307, 1266, 1241, 1257, 1292, 1347, 1446, 1548, 1657, 1785, 1964, 2118, 1888, 1794, 1658, 1552, 1462, 1394, 1349, 1332, 1342, 1378, 1436, 1525, 1617, 1736, 1848, 2048, 2195, 1989, 1855, 1742, 1633, 1555, 1487, 1437, 1427, 1429, 1471, 1521, 1603, 1699, 1804, 1921, 2149, 2334, 2103, 1971, 1863, 1757, 1666, 1598, 1565, 1537, 1554, 1579, 1640, 1716, 1810, 1923, 2044, 2308, ]
+ gb: [2383, 2122, 1974, 1866, 1767, 1684, 1620, 1581, 1559, 1575, 1592, 1654, 1726, 1816, 1917, 2071, 2294, 2242, 2002, 1872, 1752, 1650, 1564, 1499, 1455, 1438, 1442, 1485, 1537, 1614, 1715, 1814, 1935, 2155, 2114, 1929, 1797, 1674, 1568, 1477, 1406, 1358, 1340, 1348, 1386, 1447, 1534, 1631, 1754, 1861, 2057, 2044, 1859, 1737, 1606, 1493, 1396, 1322, 1270, 1247, 1259, 1305, 1370, 1455, 1566, 1679, 1808, 1979, 1981, 1812, 1674, 1549, 1424, 1325, 1246, 1191, 1168, 1179, 1222, 1294, 1383, 1498, 1623, 1748, 1932, 1939, 1777, 1626, 1500, 1376, 1265, 1179, 1128, 1104, 1119, 1160, 1235, 1331, 1447, 1577, 1708, 1885, 1922, 1735, 1602, 1464, 1333, 1226, 1134, 1083, 1061, 1071, 1113, 1191, 1296, 1412, 1543, 1677, 1849, 1885, 1723, 1574, 1437, 1310, 1191, 1105, 1055, 1035, 1048, 1088, 1164, 1272, 1388, 1516, 1660, 1847, 1891, 1714, 1568, 1431, 1300, 1185, 1099, 1047, 1024, 1038, 1075, 1155, 1259, 1386, 1512, 1649, 1832, 1901, 1722, 1575, 1434, 1309, 1196, 1109, 1054, 1041, 1047, 1087, 1162, 1267, 1385, 1526, 1650, 1833, 1912, 1740, 1588, 1456, 1329, 1220, 1133, 1080, 1065, 1072, 1113, 1189, 1289, 1410, 1538, 1672, 1862, 1949, 1767, 1632, 1487, 1367, 1261, 1175, 1123, 1100, 1114, 1158, 1224, 1331, 1450, 1571, 1705, 1880, 1990, 1811, 1670, 1531, 1420, 1315, 1227, 1180, 1158, 1172, 1212, 1285, 1375, 1490, 1611, 1744, 1925, 2033, 1864, 1715, 1588, 1477, 1377, 1307, 1253, 1232, 1248, 1285, 1344, 1439, 1545, 1661, 1797, 1971, 2126, 1898, 1798, 1658, 1548, 1449, 1381, 1338, 1315, 1329, 1366, 1428, 1512, 1617, 1730, 1853, 2058, 2203, 1998, 1856, 1734, 1624, 1539, 1467, 1424, 1409, 1409, 1448, 1505, 1584, 1689, 1796, 1923, 2148, 2342, 2110, 1959, 1848, 1740, 1635, 1572, 1533, 1519, 1527, 1561, 1610, 1693, 1786, 1900, 2039, 2306, ]
+ b: [2199, 1976, 1828, 1725, 1640, 1549, 1510, 1473, 1457, 1462, 1485, 1529, 1603, 1690, 1796, 1922, 2111, 2048, 1861, 1735, 1618, 1532, 1462, 1400, 1360, 1346, 1355, 1384, 1433, 1501, 1589, 1680, 1793, 1982, 1975, 1801, 1672, 1564, 1465, 1387, 1326, 1294, 1272, 1284, 1310, 1363, 1440, 1518, 1627, 1730, 1888, 1903, 1736, 1617, 1500, 1405, 1325, 1260, 1219, 1198, 1208, 1239, 1296, 1365, 1465, 1557, 1664, 1833, 1837, 1684, 1556, 1449, 1345, 1261, 1200, 1151, 1132, 1137, 1175, 1238, 1307, 1402, 1517, 1627, 1775, 1806, 1650, 1518, 1407, 1306, 1216, 1144, 1099, 1078, 1092, 1120, 1185, 1270, 1360, 1472, 1596, 1740, 1778, 1621, 1499, 1381, 1270, 1180, 1110, 1066, 1046, 1057, 1087, 1150, 1236, 1335, 1447, 1560, 1703, 1764, 1612, 1479, 1367, 1255, 1158, 1089, 1045, 1031, 1038, 1071, 1128, 1218, 1312, 1430, 1544, 1702, 1773, 1604, 1480, 1359, 1252, 1148, 1082, 1041, 1024, 1036, 1061, 1124, 1210, 1314, 1432, 1542, 1693, 1782, 1617, 1485, 1366, 1253, 1162, 1092, 1046, 1038, 1043, 1068, 1130, 1215, 1322, 1431, 1549, 1700, 1786, 1634, 1499, 1378, 1276, 1184, 1108, 1067, 1060, 1062, 1094, 1153, 1235, 1346, 1450, 1556, 1722, 1813, 1667, 1535, 1411, 1306, 1220, 1148, 1103, 1089, 1091, 1132, 1189, 1277, 1372, 1474, 1593, 1740, 1852, 1712, 1569, 1449, 1354, 1263, 1195, 1156, 1137, 1149, 1180, 1239, 1319, 1413, 1516, 1627, 1798, 1910, 1741, 1617, 1509, 1403, 1324, 1267, 1221, 1205, 1213, 1244, 1296, 1377, 1459, 1565, 1679, 1826, 1984, 1788, 1696, 1556, 1473, 1386, 1333, 1296, 1280, 1282, 1316, 1361, 1442, 1519, 1624, 1732, 1905, 2059, 1881, 1746, 1642, 1533, 1467, 1400, 1370, 1354, 1357, 1389, 1438, 1500, 1587, 1688, 1800, 1995, 2190, 1971, 1845, 1743, 1643, 1562, 1515, 1468, 1453, 1454, 1501, 1532, 1608, 1692, 1782, 1904, 2117, ]
+ #4208x3120_F11_TL84_70 - F11_TL84
+ - ct: 4000
+ resolution: 4208x3120
+ r: [1286, 1278, 1265, 1240, 1240, 1217, 1199, 1205, 1185, 1191, 1213, 1243, 1251, 1276, 1282, 1297, 1358, 1273, 1227, 1225, 1219, 1199, 1190, 1164, 1151, 1137, 1151, 1174, 1213, 1238, 1237, 1261, 1274, 1331, 1273, 1220, 1214, 1199, 1174, 1154, 1126, 1115, 1105, 1106, 1132, 1183, 1215, 1238, 1260, 1277, 1310, 1254, 1204, 1204, 1193, 1151, 1097, 1081, 1066, 1057, 1066, 1094, 1133, 1183, 1228, 1240, 1275, 1341, 1239, 1196, 1193, 1167, 1112, 1071, 1046, 1035, 1034, 1045, 1056, 1097, 1153, 1210, 1232, 1257, 1313, 1240, 1187, 1195, 1142, 1080, 1048, 1031, 1023, 1025, 1026, 1034, 1065, 1115, 1186, 1223, 1254, 1322, 1241, 1178, 1166, 1121, 1060, 1031, 1014, 1029, 1039, 1026, 1032, 1057, 1101, 1162, 1210, 1247, 1295, 1224, 1178, 1157, 1104, 1049, 1021, 1015, 1036, 1044, 1036, 1024, 1049, 1097, 1144, 1206, 1235, 1312, 1215, 1170, 1153, 1098, 1046, 1020, 1017, 1043, 1046, 1036, 1028, 1039, 1086, 1144, 1202, 1234, 1280, 1224, 1178, 1148, 1093, 1049, 1010, 1011, 1032, 1038, 1030, 1024, 1042, 1094, 1153, 1213, 1231, 1294, 1237, 1185, 1157, 1104, 1050, 1017, 1005, 1029, 1030, 1022, 1027, 1048, 1098, 1172, 1213, 1243, 1300, 1244, 1173, 1168, 1122, 1073, 1021, 1011, 1004, 1007, 1015, 1029, 1062, 1115, 1176, 1219, 1227, 1304, 1243, 1192, 1182, 1148, 1093, 1048, 1014, 1004, 1007, 1019, 1039, 1068, 1132, 1187, 1214, 1237, 1290, 1233, 1197, 1186, 1170, 1130, 1068, 1043, 1021, 1024, 1035, 1063, 1100, 1148, 1200, 1218, 1239, 1280, 1225, 1193, 1182, 1178, 1152, 1113, 1082, 1057, 1055, 1069, 1098, 1133, 1184, 1199, 1214, 1224, 1291, 1224, 1180, 1184, 1176, 1165, 1145, 1105, 1093, 1081, 1091, 1128, 1167, 1185, 1197, 1202, 1207, 1268, 1216, 1185, 1208, 1194, 1182, 1156, 1131, 1104, 1097, 1110, 1150, 1176, 1214, 1220, 1219, 1234, 1375, ]
+ gr: [1267, 1211, 1186, 1180, 1181, 1169, 1162, 1152, 1144, 1152, 1159, 1184, 1192, 1196, 1221, 1236, 1372, 1236, 1175, 1159, 1149, 1143, 1142, 1134, 1123, 1120, 1130, 1134, 1154, 1170, 1190, 1202, 1212, 1256, 1214, 1170, 1139, 1139, 1125, 1116, 1120, 1100, 1097, 1106, 1111, 1131, 1160, 1173, 1191, 1203, 1266, 1206, 1150, 1137, 1128, 1111, 1095, 1087, 1073, 1069, 1077, 1097, 1116, 1137, 1160, 1182, 1204, 1252, 1187, 1142, 1137, 1122, 1098, 1068, 1065, 1046, 1052, 1054, 1069, 1093, 1121, 1147, 1174, 1200, 1253, 1176, 1136, 1125, 1111, 1080, 1061, 1044, 1042, 1032, 1041, 1055, 1072, 1106, 1139, 1157, 1186, 1246, 1182, 1120, 1109, 1092, 1067, 1042, 1037, 1033, 1028, 1031, 1043, 1058, 1094, 1130, 1156, 1179, 1240, 1162, 1120, 1110, 1088, 1054, 1032, 1030, 1027, 1027, 1025, 1035, 1050, 1091, 1121, 1149, 1186, 1226, 1152, 1122, 1108, 1092, 1054, 1031, 1024, 1026, 1029, 1021, 1037, 1055, 1085, 1113, 1144, 1178, 1217, 1168, 1113, 1102, 1084, 1053, 1032, 1025, 1024, 1027, 1027, 1032, 1048, 1083, 1123, 1142, 1168, 1226, 1163, 1116, 1111, 1086, 1060, 1033, 1023, 1023, 1025, 1028, 1035, 1062, 1090, 1124, 1140, 1164, 1216, 1179, 1124, 1107, 1100, 1072, 1043, 1024, 1024, 1020, 1029, 1044, 1067, 1106, 1128, 1143, 1163, 1219, 1179, 1127, 1117, 1105, 1086, 1053, 1034, 1029, 1029, 1034, 1054, 1076, 1102, 1125, 1157, 1179, 1231, 1165, 1137, 1120, 1112, 1100, 1069, 1051, 1038, 1038, 1052, 1068, 1097, 1109, 1132, 1146, 1166, 1233, 1187, 1128, 1122, 1111, 1107, 1083, 1073, 1057, 1060, 1076, 1083, 1105, 1114, 1134, 1139, 1170, 1243, 1174, 1126, 1115, 1111, 1097, 1093, 1072, 1073, 1067, 1077, 1095, 1104, 1120, 1139, 1135, 1169, 1256, 1232, 1141, 1148, 1125, 1122, 1123, 1104, 1096, 1093, 1094, 1117, 1137, 1146, 1153, 1158, 1160, 1389, ]
+ gb: [1264, 1211, 1190, 1175, 1162, 1153, 1144, 1142, 1132, 1132, 1149, 1168, 1193, 1211, 1221, 1230, 1377, 1240, 1176, 1162, 1152, 1140, 1139, 1131, 1120, 1120, 1122, 1142, 1155, 1163, 1191, 1203, 1210, 1274, 1240, 1171, 1153, 1142, 1131, 1118, 1104, 1091, 1099, 1099, 1111, 1133, 1156, 1172, 1192, 1213, 1273, 1222, 1157, 1140, 1134, 1117, 1092, 1075, 1069, 1067, 1080, 1091, 1115, 1136, 1167, 1180, 1211, 1272, 1226, 1153, 1134, 1124, 1102, 1079, 1063, 1048, 1050, 1055, 1072, 1097, 1123, 1158, 1180, 1201, 1273, 1199, 1142, 1131, 1117, 1088, 1059, 1042, 1035, 1034, 1037, 1057, 1078, 1116, 1145, 1161, 1193, 1256, 1211, 1141, 1116, 1106, 1074, 1049, 1035, 1031, 1033, 1033, 1045, 1073, 1104, 1136, 1153, 1188, 1250, 1196, 1128, 1114, 1100, 1060, 1039, 1030, 1034, 1032, 1030, 1030, 1057, 1094, 1125, 1155, 1169, 1257, 1204, 1126, 1114, 1100, 1063, 1037, 1022, 1024, 1032, 1034, 1036, 1060, 1094, 1125, 1148, 1172, 1242, 1188, 1123, 1116, 1093, 1060, 1035, 1025, 1024, 1027, 1027, 1034, 1057, 1090, 1134, 1146, 1172, 1239, 1192, 1122, 1119, 1095, 1069, 1040, 1021, 1026, 1016, 1030, 1038, 1065, 1094, 1136, 1148, 1173, 1244, 1202, 1132, 1117, 1104, 1068, 1043, 1034, 1020, 1019, 1025, 1042, 1072, 1102, 1136, 1152, 1167, 1237, 1191, 1136, 1120, 1108, 1087, 1053, 1034, 1025, 1020, 1032, 1050, 1073, 1110, 1130, 1148, 1182, 1238, 1201, 1133, 1117, 1120, 1100, 1071, 1049, 1038, 1032, 1048, 1064, 1090, 1117, 1134, 1152, 1170, 1237, 1188, 1128, 1128, 1115, 1106, 1090, 1067, 1058, 1058, 1066, 1082, 1107, 1115, 1135, 1148, 1171, 1250, 1187, 1138, 1126, 1119, 1108, 1095, 1078, 1075, 1066, 1079, 1090, 1099, 1121, 1143, 1149, 1165, 1237, 1229, 1158, 1157, 1139, 1119, 1118, 1101, 1078, 1084, 1091, 1103, 1125, 1130, 1149, 1173, 1184, 1398, ]
+ b: [1291, 1208, 1168, 1145, 1132, 1140, 1122, 1134, 1138, 1129, 1131, 1140, 1161, 1197, 1196, 1179, 1329, 1235, 1176, 1150, 1125, 1118, 1113, 1115, 1113, 1108, 1113, 1115, 1131, 1136, 1149, 1181, 1176, 1255, 1237, 1147, 1129, 1116, 1119, 1106, 1104, 1091, 1086, 1099, 1104, 1119, 1137, 1134, 1164, 1179, 1231, 1204, 1137, 1111, 1113, 1103, 1096, 1079, 1070, 1070, 1074, 1090, 1104, 1120, 1126, 1149, 1183, 1234, 1208, 1123, 1112, 1118, 1097, 1075, 1066, 1055, 1051, 1059, 1066, 1090, 1114, 1127, 1135, 1157, 1226, 1197, 1110, 1109, 1095, 1083, 1055, 1047, 1044, 1040, 1044, 1051, 1063, 1095, 1112, 1132, 1148, 1232, 1198, 1107, 1098, 1081, 1063, 1051, 1043, 1036, 1033, 1033, 1043, 1061, 1082, 1109, 1116, 1144, 1209, 1161, 1095, 1096, 1091, 1054, 1042, 1039, 1035, 1035, 1022, 1042, 1053, 1080, 1107, 1122, 1132, 1216, 1169, 1097, 1094, 1081, 1048, 1041, 1024, 1034, 1034, 1031, 1034, 1058, 1074, 1105, 1124, 1124, 1218, 1188, 1095, 1092, 1079, 1054, 1042, 1032, 1035, 1022, 1025, 1035, 1053, 1080, 1107, 1118, 1132, 1228, 1181, 1093, 1094, 1077, 1059, 1043, 1030, 1030, 1023, 1033, 1036, 1058, 1090, 1109, 1111, 1135, 1209, 1191, 1105, 1096, 1087, 1060, 1044, 1034, 1034, 1020, 1034, 1037, 1063, 1087, 1112, 1123, 1138, 1226, 1203, 1118, 1090, 1097, 1081, 1052, 1041, 1027, 1030, 1034, 1048, 1067, 1093, 1110, 1121, 1142, 1220, 1210, 1127, 1102, 1091, 1087, 1061, 1052, 1024, 1044, 1041, 1056, 1076, 1091, 1113, 1125, 1152, 1216, 1194, 1107, 1106, 1077, 1085, 1074, 1060, 1048, 1041, 1048, 1060, 1082, 1085, 1085, 1125, 1132, 1218, 1190, 1112, 1074, 1071, 1066, 1067, 1050, 1045, 1045, 1045, 1061, 1075, 1070, 1088, 1106, 1128, 1222, 1234, 1145, 1131, 1120, 1099, 1095, 1079, 1078, 1073, 1078, 1083, 1086, 1108, 1125, 1141, 1156, 1386, ]
+ #4208x3120_F2_CWF_70 - F2_CWF
+ - ct: 4230
+ resolution: 4208x3120
+ r: [1140, 1119, 1106, 1105, 1086, 1079, 1072, 1070, 1070, 1079, 1084, 1102, 1114, 1131, 1157, 1152, 1232, 1131, 1103, 1088, 1084, 1071, 1074, 1077, 1066, 1064, 1063, 1080, 1094, 1101, 1112, 1113, 1134, 1194, 1143, 1073, 1077, 1078, 1069, 1067, 1058, 1060, 1046, 1048, 1067, 1085, 1095, 1101, 1127, 1144, 1169, 1132, 1072, 1074, 1078, 1055, 1045, 1037, 1033, 1039, 1036, 1045, 1068, 1085, 1098, 1122, 1115, 1183, 1106, 1064, 1069, 1068, 1049, 1026, 1030, 1019, 1025, 1026, 1038, 1051, 1070, 1100, 1102, 1120, 1174, 1103, 1043, 1052, 1055, 1024, 1023, 1017, 1019, 1025, 1024, 1032, 1037, 1063, 1085, 1094, 1110, 1195, 1095, 1047, 1062, 1041, 1025, 1017, 1011, 1031, 1027, 1023, 1023, 1030, 1050, 1071, 1084, 1110, 1190, 1073, 1034, 1056, 1042, 1015, 1010, 1016, 1032, 1027, 1024, 1024, 1036, 1039, 1074, 1087, 1109, 1168, 1079, 1042, 1055, 1032, 1019, 1007, 1013, 1026, 1027, 1026, 1021, 1032, 1044, 1082, 1093, 1098, 1158, 1091, 1046, 1053, 1028, 1020, 1007, 1011, 1026, 1022, 1019, 1021, 1020, 1045, 1071, 1084, 1096, 1159, 1114, 1047, 1047, 1030, 1017, 997, 1008, 1016, 1019, 1021, 1016, 1028, 1053, 1080, 1094, 1103, 1157, 1088, 1049, 1052, 1040, 1024, 1003, 1001, 1004, 1010, 1006, 1019, 1037, 1057, 1085, 1084, 1099, 1161, 1106, 1057, 1063, 1056, 1032, 1010, 993, 998, 999, 1006, 1016, 1031, 1052, 1071, 1089, 1106, 1174, 1112, 1055, 1054, 1062, 1043, 1022, 1002, 1004, 1008, 1007, 1015, 1045, 1064, 1085, 1087, 1097, 1157, 1102, 1059, 1064, 1059, 1054, 1035, 1018, 1002, 1005, 1012, 1035, 1052, 1057, 1068, 1071, 1098, 1156, 1098, 1045, 1044, 1042, 1046, 1041, 1024, 1009, 1004, 1017, 1035, 1062, 1062, 1064, 1064, 1088, 1140, 1088, 1043, 1070, 1066, 1041, 1047, 1026, 1014, 1009, 1022, 1032, 1060, 1073, 1077, 1087, 1107, 1237, ]
+ gr: [1219, 1156, 1145, 1130, 1128, 1112, 1116, 1104, 1112, 1106, 1118, 1128, 1154, 1165, 1161, 1170, 1306, 1183, 1124, 1113, 1099, 1100, 1099, 1091, 1084, 1095, 1090, 1099, 1116, 1126, 1140, 1142, 1158, 1213, 1174, 1112, 1103, 1094, 1084, 1087, 1090, 1075, 1075, 1077, 1088, 1101, 1119, 1133, 1149, 1162, 1193, 1149, 1106, 1091, 1086, 1076, 1071, 1066, 1057, 1064, 1064, 1074, 1082, 1109, 1117, 1140, 1151, 1204, 1155, 1094, 1089, 1088, 1075, 1059, 1052, 1046, 1043, 1048, 1061, 1074, 1101, 1113, 1123, 1154, 1198, 1137, 1093, 1082, 1078, 1059, 1048, 1041, 1033, 1030, 1038, 1048, 1059, 1078, 1109, 1116, 1143, 1198, 1119, 1082, 1074, 1071, 1051, 1040, 1036, 1032, 1031, 1031, 1042, 1047, 1077, 1097, 1112, 1133, 1185, 1126, 1082, 1077, 1058, 1039, 1029, 1025, 1024, 1024, 1022, 1033, 1044, 1068, 1095, 1099, 1131, 1187, 1123, 1078, 1071, 1060, 1043, 1028, 1025, 1027, 1027, 1021, 1033, 1045, 1066, 1087, 1105, 1121, 1173, 1121, 1070, 1067, 1058, 1039, 1024, 1020, 1024, 1024, 1022, 1030, 1043, 1064, 1093, 1099, 1121, 1182, 1112, 1076, 1072, 1065, 1044, 1029, 1021, 1023, 1021, 1026, 1032, 1047, 1066, 1091, 1105, 1131, 1180, 1132, 1076, 1066, 1067, 1052, 1031, 1021, 1021, 1020, 1028, 1039, 1044, 1076, 1098, 1107, 1127, 1179, 1124, 1087, 1076, 1076, 1064, 1036, 1018, 1018, 1020, 1028, 1041, 1056, 1085, 1086, 1106, 1128, 1187, 1126, 1099, 1082, 1072, 1065, 1043, 1031, 1024, 1029, 1034, 1052, 1065, 1074, 1094, 1111, 1127, 1181, 1128, 1086, 1076, 1073, 1072, 1058, 1050, 1046, 1039, 1048, 1059, 1074, 1070, 1096, 1112, 1124, 1174, 1140, 1078, 1077, 1067, 1057, 1055, 1043, 1040, 1042, 1042, 1054, 1069, 1075, 1088, 1099, 1112, 1189, 1182, 1099, 1096, 1093, 1082, 1080, 1072, 1055, 1059, 1061, 1076, 1095, 1090, 1112, 1113, 1140, 1321, ]
+ gb: [1236, 1163, 1136, 1120, 1113, 1111, 1109, 1101, 1104, 1099, 1102, 1140, 1141, 1158, 1170, 1194, 1332, 1195, 1138, 1114, 1109, 1097, 1098, 1092, 1089, 1085, 1089, 1098, 1117, 1125, 1141, 1155, 1156, 1232, 1186, 1125, 1108, 1095, 1099, 1081, 1078, 1075, 1073, 1073, 1083, 1097, 1118, 1128, 1148, 1166, 1218, 1171, 1107, 1099, 1091, 1086, 1069, 1059, 1051, 1049, 1064, 1071, 1088, 1110, 1118, 1137, 1162, 1225, 1171, 1099, 1092, 1085, 1069, 1057, 1051, 1041, 1036, 1050, 1055, 1077, 1092, 1118, 1133, 1151, 1227, 1158, 1099, 1090, 1086, 1061, 1043, 1039, 1028, 1036, 1039, 1048, 1060, 1091, 1110, 1117, 1147, 1216, 1152, 1086, 1082, 1073, 1054, 1040, 1026, 1028, 1029, 1032, 1040, 1051, 1076, 1104, 1115, 1139, 1222, 1141, 1088, 1078, 1073, 1048, 1034, 1026, 1025, 1025, 1022, 1033, 1051, 1077, 1104, 1115, 1129, 1202, 1154, 1081, 1080, 1069, 1050, 1029, 1023, 1022, 1029, 1027, 1031, 1050, 1070, 1098, 1107, 1127, 1188, 1146, 1090, 1078, 1065, 1044, 1029, 1015, 1022, 1024, 1025, 1035, 1053, 1071, 1104, 1102, 1136, 1207, 1152, 1083, 1078, 1073, 1042, 1027, 1024, 1024, 1016, 1024, 1037, 1056, 1076, 1106, 1111, 1130, 1197, 1146, 1086, 1076, 1074, 1046, 1031, 1023, 1018, 1021, 1026, 1043, 1051, 1081, 1102, 1111, 1126, 1191, 1134, 1090, 1084, 1079, 1067, 1038, 1019, 1018, 1021, 1033, 1041, 1055, 1081, 1099, 1107, 1131, 1199, 1147, 1091, 1082, 1083, 1072, 1050, 1031, 1024, 1027, 1032, 1053, 1063, 1082, 1099, 1107, 1130, 1191, 1139, 1087, 1078, 1077, 1073, 1058, 1048, 1037, 1037, 1046, 1062, 1073, 1079, 1099, 1099, 1130, 1177, 1147, 1082, 1087, 1074, 1061, 1062, 1052, 1042, 1036, 1045, 1063, 1068, 1079, 1094, 1103, 1120, 1189, 1176, 1105, 1102, 1092, 1081, 1073, 1064, 1053, 1053, 1066, 1067, 1084, 1087, 1103, 1134, 1146, 1336, ]
+ b: [1203, 1195, 1154, 1123, 1104, 1106, 1116, 1099, 1099, 1099, 1102, 1106, 1123, 1155, 1149, 1168, 1283, 1196, 1141, 1119, 1102, 1098, 1088, 1088, 1095, 1086, 1095, 1097, 1101, 1117, 1121, 1156, 1135, 1209, 1211, 1127, 1102, 1082, 1089, 1088, 1072, 1075, 1083, 1083, 1085, 1106, 1107, 1120, 1142, 1149, 1224, 1163, 1121, 1087, 1078, 1085, 1077, 1062, 1065, 1056, 1057, 1082, 1093, 1094, 1096, 1111, 1147, 1193, 1179, 1105, 1083, 1088, 1070, 1074, 1060, 1048, 1055, 1044, 1068, 1082, 1091, 1097, 1102, 1141, 1209, 1178, 1091, 1076, 1077, 1063, 1060, 1043, 1043, 1035, 1046, 1059, 1064, 1084, 1103, 1107, 1125, 1196, 1156, 1088, 1068, 1070, 1057, 1043, 1046, 1041, 1038, 1038, 1046, 1059, 1073, 1083, 1086, 1111, 1178, 1146, 1067, 1083, 1068, 1044, 1042, 1033, 1044, 1033, 1026, 1037, 1045, 1067, 1089, 1092, 1108, 1203, 1148, 1082, 1072, 1066, 1050, 1044, 1035, 1035, 1031, 1028, 1035, 1055, 1069, 1082, 1094, 1101, 1188, 1163, 1067, 1074, 1056, 1040, 1034, 1037, 1026, 1022, 1033, 1037, 1049, 1067, 1084, 1092, 1103, 1185, 1156, 1074, 1073, 1066, 1042, 1036, 1028, 1031, 1030, 1034, 1042, 1051, 1073, 1091, 1090, 1102, 1196, 1172, 1086, 1071, 1077, 1055, 1041, 1036, 1025, 1024, 1028, 1032, 1053, 1076, 1094, 1089, 1101, 1178, 1179, 1095, 1079, 1075, 1070, 1043, 1026, 1022, 1022, 1029, 1045, 1054, 1078, 1075, 1092, 1120, 1179, 1193, 1091, 1074, 1061, 1064, 1056, 1043, 1034, 1026, 1027, 1039, 1060, 1081, 1070, 1078, 1115, 1205, 1172, 1096, 1069, 1060, 1071, 1055, 1044, 1035, 1027, 1043, 1048, 1063, 1054, 1065, 1083, 1122, 1186, 1158, 1088, 1060, 1043, 1037, 1037, 1031, 1033, 1025, 1029, 1035, 1041, 1041, 1060, 1084, 1114, 1202, 1217, 1122, 1101, 1079, 1058, 1061, 1049, 1056, 1051, 1036, 1062, 1061, 1076, 1094, 1116, 1139, 1331, ]
+
diff --git a/src/ipa/rkisp1/data/meson.build b/src/ipa/rkisp1/data/meson.build
index c3b4e388..7150e155 100644
--- a/src/ipa/rkisp1/data/meson.build
+++ b/src/ipa/rkisp1/data/meson.build
@@ -2,9 +2,11 @@
conf_files = files([
'imx219.yaml',
+ 'ov4689.yaml',
'ov5640.yaml',
'uncalibrated.yaml',
])
install_data(conf_files,
- install_dir : ipa_data_dir / 'rkisp1')
+ install_dir : ipa_data_dir / 'rkisp1',
+ install_tag : 'runtime')
diff --git a/src/ipa/rkisp1/data/ov2685.yaml b/src/ipa/rkisp1/data/ov2685.yaml
new file mode 100644
index 00000000..fdfc98d3
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov2685.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ #800x600_A_70 - A
+ - ct: 2856
+ resolution: 800x600
+ r: [2451, 2258, 2111, 2039, 1982, 1925, 1860, 1818, 1802, 1815, 1859, 1936, 1997, 2056, 2129, 2298, 2486, 2351, 2157, 2066, 1991, 1912, 1809, 1720, 1677, 1653, 1671, 1739, 1843, 1932, 2009, 2071, 2182, 2392, 2253, 2105, 2018, 1929, 1802, 1670, 1566, 1503, 1475, 1508, 1590, 1705, 1848, 1947, 2026, 2118, 2281, 2174, 2065, 1975, 1854, 1687, 1529, 1412, 1345, 1327, 1358, 1445, 1572, 1733, 1870, 1992, 2075, 2202, 2125, 2033, 1929, 1765, 1574, 1407, 1286, 1220, 1204, 1237, 1318, 1447, 1632, 1801, 1951, 2048, 2142, 2092, 2010, 1877, 1688, 1471, 1304, 1187, 1127, 1118, 1149, 1221, 1348, 1533, 1738, 1918, 2021, 2105, 2088, 1982, 1836, 1628, 1398, 1239, 1128, 1073, 1060, 1086, 1163, 1280, 1466, 1688, 1886, 2001, 2092, 2067, 1965, 1809, 1584, 1358, 1200, 1094, 1044, 1030, 1056, 1123, 1240, 1424, 1649, 1860, 1989, 2082, 2057, 1960, 1795, 1569, 1345, 1187, 1083, 1034, 1024, 1046, 1111, 1229, 1408, 1637, 1850, 1989, 2085, 2053, 1967, 1802, 1578, 1358, 1199, 1095, 1046, 1031, 1058, 1122, 1245, 1423, 1651, 1867, 1989, 2084, 2059, 1970, 1823, 1615, 1399, 1235, 1129, 1074, 1061, 1090, 1161, 1281, 1461, 1689, 1878, 2006, 2096, 2086, 1989, 1866, 1670, 1471, 1302, 1188, 1134, 1117, 1150, 1223, 1352, 1537, 1745, 1909, 2028, 2114, 2101, 2006, 1916, 1749, 1567, 1399, 1278, 1218, 1206, 1237, 1317, 1456, 1633, 1813, 1954, 2053, 2142, 2171, 2023, 1954, 1843, 1680, 1526, 1403, 1339, 1323, 1357, 1440, 1575, 1733, 1885, 1996, 2069, 2212, 2231, 2074, 1990, 1916, 1792, 1656, 1554, 1489, 1473, 1513, 1588, 1702, 1840, 1946, 2011, 2124, 2283, 2343, 2146, 2036, 1973, 1890, 1789, 1700, 1653, 1645, 1678, 1733, 1828, 1922, 1978, 2065, 2181, 2405, 2420, 2246, 2092, 2015, 1954, 1885, 1816, 1776, 1777, 1791, 1847, 1904, 1941, 2016, 2105, 2284, 2463, ]
+ gr: [1790, 1645, 1522, 1469, 1433, 1419, 1390, 1381, 1374, 1381, 1401, 1428, 1460, 1494, 1552, 1693, 1839, 1687, 1555, 1471, 1433, 1408, 1362, 1335, 1319, 1308, 1318, 1344, 1393, 1430, 1456, 1497, 1591, 1752, 1612, 1503, 1447, 1417, 1365, 1315, 1276, 1248, 1237, 1252, 1290, 1339, 1404, 1435, 1469, 1539, 1661, 1547, 1470, 1424, 1389, 1321, 1260, 1205, 1173, 1165, 1181, 1221, 1286, 1358, 1409, 1452, 1503, 1603, 1504, 1451, 1411, 1358, 1276, 1198, 1148, 1114, 1110, 1124, 1164, 1228, 1320, 1388, 1435, 1479, 1552, 1475, 1437, 1392, 1325, 1231, 1153, 1094, 1069, 1068, 1084, 1119, 1182, 1278, 1365, 1429, 1469, 1529, 1464, 1430, 1375, 1301, 1196, 1118, 1067, 1043, 1039, 1051, 1089, 1150, 1245, 1342, 1417, 1453, 1512, 1461, 1418, 1369, 1281, 1177, 1099, 1051, 1028, 1029, 1037, 1069, 1129, 1224, 1328, 1404, 1449, 1503, 1455, 1422, 1366, 1276, 1170, 1094, 1046, 1026, 1024, 1033, 1063, 1125, 1216, 1322, 1400, 1448, 1508, 1459, 1426, 1368, 1280, 1179, 1102, 1051, 1030, 1029, 1039, 1071, 1132, 1222, 1327, 1406, 1448, 1502, 1473, 1433, 1380, 1302, 1201, 1125, 1069, 1046, 1043, 1055, 1091, 1153, 1245, 1343, 1412, 1461, 1523, 1488, 1445, 1397, 1328, 1242, 1157, 1104, 1079, 1073, 1088, 1127, 1193, 1284, 1373, 1424, 1473, 1543, 1521, 1461, 1424, 1361, 1289, 1210, 1152, 1124, 1118, 1134, 1174, 1242, 1330, 1396, 1439, 1494, 1572, 1573, 1475, 1434, 1397, 1336, 1270, 1213, 1182, 1176, 1194, 1239, 1301, 1366, 1420, 1464, 1510, 1624, 1628, 1510, 1449, 1424, 1378, 1326, 1281, 1252, 1243, 1264, 1304, 1352, 1406, 1443, 1456, 1554, 1692, 1727, 1578, 1482, 1448, 1415, 1374, 1337, 1318, 1317, 1338, 1356, 1398, 1429, 1443, 1501, 1603, 1783, 1776, 1643, 1510, 1448, 1415, 1387, 1353, 1344, 1343, 1348, 1368, 1396, 1407, 1442, 1515, 1674, 1832, ]
+ gb: [1805, 1650, 1529, 1468, 1430, 1412, 1378, 1371, 1363, 1371, 1393, 1430, 1465, 1501, 1567, 1713, 1864, 1700, 1564, 1476, 1434, 1404, 1359, 1323, 1306, 1294, 1306, 1338, 1388, 1432, 1462, 1509, 1605, 1780, 1627, 1520, 1457, 1423, 1370, 1311, 1267, 1238, 1226, 1245, 1286, 1344, 1414, 1448, 1489, 1563, 1697, 1568, 1487, 1436, 1398, 1325, 1257, 1200, 1163, 1156, 1175, 1221, 1291, 1372, 1427, 1476, 1528, 1636, 1527, 1474, 1431, 1371, 1285, 1201, 1144, 1109, 1104, 1121, 1165, 1239, 1335, 1411, 1461, 1509, 1588, 1498, 1463, 1413, 1343, 1242, 1159, 1094, 1066, 1064, 1083, 1124, 1195, 1299, 1391, 1455, 1499, 1561, 1492, 1454, 1401, 1319, 1209, 1124, 1068, 1042, 1039, 1053, 1096, 1164, 1268, 1370, 1446, 1486, 1547, 1486, 1446, 1392, 1302, 1190, 1108, 1053, 1028, 1029, 1040, 1078, 1146, 1245, 1355, 1437, 1600, 1546, 1600, 1449, 1389, 1294, 1184, 1101, 1047, 1024, 1024, 1035, 1073, 1136, 1240, 1348, 1431, 1483, 1537, 1485, 1450, 1390, 1298, 1188, 1109, 1051, 1030, 1026, 1038, 1077, 1143, 1243, 1354, 1436, 1482, 1547, 1494, 1454, 1400, 1317, 1211, 1125, 1067, 1041, 1038, 1053, 1094, 1165, 1264, 1368, 1440, 1489, 1557, 1513, 1464, 1414, 1340, 1245, 1156, 1097, 1071, 1063, 1081, 1126, 1197, 1298, 1394, 1446, 1502, 1573, 1541, 1477, 1438, 1370, 1292, 1204, 1142, 1111, 1106, 1121, 1169, 1245, 1338, 1411, 1462, 1519, 1599, 1590, 1485, 1447, 1403, 1334, 1263, 1199, 1164, 1158, 1179, 1230, 1299, 1373, 1433, 1477, 1528, 1649, 1643, 1520, 1454, 1426, 1375, 1315, 1266, 1235, 1224, 1247, 1291, 1345, 1408, 1449, 1468, 1572, 1711, 1738, 1579, 1482, 1443, 1406, 1359, 1318, 1294, 1294, 1312, 1338, 1385, 1427, 1441, 1507, 1614, 1799, 1786, 1653, 1516, 1452, 1414, 1383, 1348, 1331, 1328, 1336, 1362, 1391, 1408, 1448, 1529, 1684, 1858, ]
+ b: [1807, 1633, 1496, 1427, 1395, 1372, 1357, 1340, 1339, 1335, 1356, 1382, 1410, 1454, 1541, 1690, 1860, 1657, 1503, 1411, 1364, 1342, 1312, 1286, 1274, 1262, 1270, 1287, 1326, 1355, 1387, 1447, 1550, 1726, 1556, 1438, 1374, 1340, 1305, 1267, 1236, 1213, 1199, 1211, 1246, 1280, 1324, 1355, 1397, 1475, 1620, 1473, 1407, 1350, 1317, 1270, 1223, 1173, 1144, 1135, 1151, 1185, 1237, 1292, 1326, 1368, 1422, 1544, 1430, 1375, 1331, 1293, 1238, 1166, 1120, 1096, 1091, 1104, 1133, 1188, 1261, 1310, 1351, 1388, 1487, 1383, 1362, 1316, 1269, 1194, 1128, 1076, 1054, 1057, 1070, 1101, 1146, 1229, 1294, 1329, 1368, 1459, 1368, 1347, 1301, 1250, 1162, 1099, 1057, 1039, 1035, 1041, 1076, 1119, 1199, 1271, 1321, 1349, 1440, 1360, 1338, 1299, 1234, 1145, 1086, 1042, 1029, 1026, 1034, 1059, 1104, 1176, 1260, 1307, 1344, 1439, 1347, 1342, 1293, 1226, 1139, 1077, 1040, 1024, 1025, 1030, 1051, 1099, 1170, 1249, 1301, 1335, 1432, 1346, 1342, 1295, 1227, 1145, 1083, 1040, 1025, 1024, 1031, 1059, 1096, 1170, 1247, 1297, 1338, 1436, 1362, 1344, 1299, 1245, 1161, 1095, 1055, 1034, 1031, 1041, 1069, 1115, 1185, 1252, 1299, 1347, 1453, 1378, 1353, 1311, 1261, 1191, 1117, 1077, 1058, 1045, 1063, 1092, 1141, 1210, 1274, 1302, 1358, 1461, 1405, 1364, 1329, 1281, 1229, 1159, 1106, 1084, 1080, 1093, 1124, 1180, 1244, 1285, 1317, 1380, 1496, 1467, 1379, 1343, 1304, 1260, 1208, 1154, 1127, 1117, 1138, 1172, 1225, 1266, 1297, 1340, 1397, 1556, 1532, 1428, 1354, 1325, 1290, 1248, 1211, 1181, 1178, 1197, 1227, 1261, 1293, 1321, 1342, 1450, 1624, 1634, 1502, 1394, 1347, 1316, 1283, 1251, 1239, 1241, 1254, 1266, 1297, 1312, 1328, 1396, 1509, 1739, 1685, 1572, 1426, 1351, 1313, 1285, 1257, 1254, 1249, 1259, 1266, 1287, 1292, 1336, 1429, 1593, 1816, ]
+ #800x600_D65_70 - D65
+ - ct: 6504
+ resolution: 800x600
+ r: [2310, 2164, 1991, 1936, 1850, 1817, 1755, 1703, 1707, 1707, 1757, 1836, 1862, 1962, 2029, 2221, 2360, 2246, 2047, 1960, 1865, 1809, 1707, 1633, 1600, 1571, 1595, 1646, 1733, 1829, 1886, 1973, 2107, 2297, 2150, 1988, 1897, 1818, 1703, 1592, 1504, 1453, 1424, 1452, 1527, 1625, 1753, 1828, 1929, 2014, 2213, 2056, 1960, 1846, 1757, 1608, 1475, 1376, 1315, 1297, 1330, 1399, 1512, 1645, 1782, 1879, 1981, 2117, 2007, 1925, 1817, 1678, 1513, 1371, 1268, 1205, 1188, 1221, 800, 1406, 1563, 1712, 1840, 1954, 2039, 1988, 1883, 1780, 1612, 1425, 1282, 1180, 1125, 1111, 1140, 1208, 1324, 1484, 1660, 1821, 1914, 2015, 1973, 1864, 1740, 1553, 1366, 1220, 1124, 1069, 1057, 1083, 1154, 1264, 1423, 1615, 1794, 1891, 2000, 1955, 1842, 1717, 1524, 1332, 1187, 1094, 1042, 1028, 1053, 1117, 1229, 1387, 1582, 1767, 1877, 1991, 1942, 1849, 1704, 1509, 1320, 1177, 1081, 1031, 1024, 1042, 1108, 1216, 1376, 1569, 1767, 1877, 1998, 1946, 1853, 1710, 1515, 1335, 1186, 1092, 1041, 1030, 1055, 1118, 1233, 1390, 1584, 1773, 1885, 1985, 1958, 1852, 1737, 1550, 1370, 1224, 1125, 1073, 1058, 1089, 1155, 1265, 1419, 1614, 1788, 1894, 2007, 1973, 1875, 1768, 1604, 1426, 1282, 1181, 1128, 1112, 1145, 1214, 1330, 1491, 1667, 1810, 1926, 2015, 1995, 1902, 1815, 1667, 1513, 1371, 1262, 1207, 1194, 1224, 1299, 1418, 1569, 1723, 1848, 1961, 2038, 2051, 1925, 1837, 1758, 1606, 1473, 1373, 1313, 1302, 1335, 1405, 1521, 1650, 1793, 1893, 1977, 2116, 2136, 1971, 1882, 1815, 1703, 1587, 1492, 1445, 1432, 1461, 1529, 1624, 1754, 1841, 1907, 2032, 2215, 2244, 2038, 1200, 1860, 1800, 1696, 1625, 1583, 1577, 1610, 1653, 1734, 1822, 1865, 1980, 2109, 2298, 2286, 2159, 1971, 1909, 1828, 1794, 1703, 1686, 1686, 1689, 1740, 1810, 1830, 1925, 1999, 2201, 2357, ]
+ gr: [1785, 1800, 1516, 1458, 1422, 1403, 1374, 1363, 1359, 1363, 1385, 1417, 1447, 1486, 1547, 1693, 1834, 1675, 1547, 1462, 1418, 1393, 1346, 1319, 1304, 1289, 1302, 1330, 1382, 1417, 1451, 1492, 1592, 1743, 1607, 1498, 1437, 1404, 1353, 1301, 1264, 1238, 1226, 1240, 1281, 1325, 1398, 1426, 1468, 1541, 1668, 1547, 1466, 1413, 1382, 1311, 1251, 1202, 1168, 1161, 1176, 1218, 1275, 1351, 1408, 1449, 1498, 1606, 1499, 1447, 1404, 1349, 1269, 1199, 1147, 1113, 1106, 1123, 1163, 1225, 1313, 1384, 1435, 1485, 1551, 1467, 1437, 1388, 1318, 1228, 1154, 1099, 1070, 1066, 1081, 1120, 1185, 1278, 1362, 1430, 1468, 1530, 1460, 1422, 1370, 1293, 1199, 1121, 1068, 1044, 1035, 1052, 1090, 1155, 1244, 1344, 1420, 1457, 1507, 1460, 1416, 1363, 1278, 1179, 1105, 1054, 1028, 1028, 1036, 1073, 1134, 1230, 1323, 1413, 1452, 1509, 1454, 1421, 1361, 1272, 1174, 1097, 1046, 1025, 1024, 1033, 1068, 1130, 1222, 1320, 1408, 1450, 1503, 1456, 1423, 1366, 1275, 1184, 1105, 1053, 1030, 1027, 1040, 1073, 1136, 1228, 1324, 1411, 1457, 1508, 1472, 1429, 1376, 1294, 1205, 1126, 1072, 1046, 1044, 1058, 1095, 1159, 1246, 1345, 1419, 1464, 1530, 1481, 1443, 1396, 1322, 1239, 1161, 1104, 1078, 1070, 1088, 1128, 1196, 1283, 1371, 1428, 1600, 1551, 1521, 1457, 1421, 1355, 1282, 1209, 1152, 1125, 1116, 1134, 1176, 1243, 1324, 1398, 1446, 1497, 1581, 1571, 1471, 1430, 1392, 1328, 1262, 1210, 1179, 1172, 1191, 1236, 1295, 1363, 1424, 1465, 1511, 1636, 1636, 1509, 1448, 1415, 1368, 1316, 1271, 1243, 1234, 1258, 800, 1340, 1407, 1439, 1459, 1561, 1699, 1720, 1577, 1479, 1444, 1408, 1362, 1325, 1304, 1305, 1325, 1348, 1394, 1426, 1439, 1503, 1609, 1788, 1770, 1642, 1502, 1444, 1400, 1384, 1338, 1334, 1329, 1339, 1357, 1389, 1396, 1443, 1514, 1670, 1822, ]
+ gb: [1791, 1649, 1516, 1459, 1422, 1404, 1373, 1360, 1353, 1358, 1386, 1424, 1451, 1492, 1563, 1710, 1854, 1687, 1553, 1463, 1420, 1393, 1347, 1313, 800, 1284, 1295, 1324, 1376, 1417, 1455, 1493, 1609, 1768, 1617, 1511, 1444, 1409, 1359, 1299, 1260, 1234, 1219, 1237, 1276, 1328, 1403, 1431, 1479, 1557, 1696, 1555, 1477, 1422, 1388, 1311, 1250, 1200, 1165, 1158, 1174, 1217, 1281, 1358, 1416, 1463, 1520, 1629, 1520, 1458, 1415, 1355, 1272, 1203, 1144, 1111, 1105, 1122, 1165, 1231, 1322, 1394, 1447, 1497, 1577, 1481, 1452, 1399, 1330, 1234, 1160, 1101, 1070, 1065, 1082, 1124, 1192, 1288, 1373, 1443, 1485, 1556, 1476, 1437, 1384, 1304, 1207, 1124, 1070, 1045, 1039, 1055, 1092, 1163, 1256, 1357, 1429, 1475, 1539, 1470, 1430, 1373, 1288, 1186, 1108, 1056, 1029, 1027, 1040, 1078, 1142, 1240, 1336, 1424, 1469, 1529, 1465, 1433, 1370, 1281, 1179, 1102, 1049, 1025, 1024, 1035, 1070, 1134, 1230, 1332, 1420, 1464, 1536, 1469, 1434, 1372, 1283, 1186, 1108, 1055, 1029, 1027, 1037, 1076, 1145, 1236, 1337, 1421, 1468, 1535, 1478, 1438, 1382, 1303, 1210, 1128, 1070, 1044, 1040, 1056, 1096, 1164, 1255, 1355, 1427, 1478, 1551, 1489, 1454, 1401, 1329, 1239, 1160, 1102, 1075, 1067, 1084, 1128, 1196, 1288, 1380, 1435, 1492, 1573, 1528, 1464, 1426, 1358, 1283, 1206, 1146, 1116, 1110, 1129, 1172, 1242, 1327, 1402, 1451, 1508, 1597, 1574, 1476, 1433, 1395, 1326, 1254, 1202, 1170, 1165, 1182, 1230, 1292, 1361, 1425, 1471, 1526, 1657, 1638, 1512, 1449, 1418, 1366, 1308, 1259, 1230, 1223, 1246, 1285, 1334, 1402, 1439, 1465, 1574, 1712, 1723, 1575, 1474, 1440, 1400, 1353, 1312, 1289, 1287, 1305, 1332, 1381, 1417, 1440, 1504, 1616, 1806, 1780, 1652, 1506, 1448, 1403, 1380, 1340, 1327, 1325, 1335, 1350, 1390, 1402, 1448, 1532, 1693, 1848, ]
+ b: [1834, 1686, 1532, 1462, 1420, 1404, 1369, 1360, 1354, 1357, 1375, 1415, 1442, 1496, 1568, 1741, 1872, 1706, 1543, 1441, 1391, 1366, 1321, 1295, 1281, 1270, 1276, 1305, 1345, 1389, 1418, 1477, 1588, 1752, 1594, 1473, 1400, 1363, 1317, 1269, 1238, 1216, 1206, 1214, 1250, 800, 1353, 1389, 1434, 1503, 1664, 1514, 1437, 1372, 1334, 1278, 1228, 1180, 1151, 1143, 1159, 1196, 1246, 1313, 1359, 1405, 1453, 1587, 1465, 1401, 1351, 1308, 1236, 1177, 1127, 1101, 1093, 1109, 1141, 1200, 1274, 1335, 1384, 1427, 1522, 1423, 1386, 1335, 1275, 1199, 1133, 1087, 1063, 1059, 1069, 1104, 1159, 1240, 1316, 1369, 1402, 1493, 1407, 1375, 1318, 1256, 1172, 1107, 1060, 1041, 1035, 1048, 1077, 1135, 1211, 1291, 1354, 1391, 1478, 1390, 1365, 1313, 1239, 1153, 1089, 1047, 1029, 1028, 1033, 1065, 1116, 1193, 1278, 1342, 1382, 1475, 1384, 1364, 1308, 1231, 1146, 1082, 1040, 1025, 1024, 1030, 1057, 1110, 1183, 1269, 1337, 1379, 1475, 1384, 1372, 1309, 1233, 1152, 1086, 1046, 1024, 1024, 1032, 1061, 1113, 1187, 1268, 1337, 1379, 1479, 1395, 1370, 1317, 1249, 1171, 1102, 1058, 1035, 1029, 1047, 1073, 1130, 1200, 1278, 1341, 1388, 1491, 1420, 1383, 1336, 1265, 1195, 1129, 1078, 1059, 1053, 1065, 1102, 1155, 1227, 1301, 1348, 1405, 1505, 1452, 1396, 1356, 1295, 1234, 1166, 1116, 1092, 1084, 1103, 1139, 1195, 1262, 1321, 1364, 1420, 1547, 1517, 1414, 1375, 1324, 1269, 1214, 1165, 1138, 1132, 1148, 1188, 1239, 1291, 1336, 1387, 1446, 1604, 1587, 1471, 1383, 1354, 1309, 1257, 1216, 1192, 1187, 1209, 1241, 1277, 1330, 1366, 1384, 1498, 1682, 1689, 1543, 1427, 1381, 1344, 1303, 1265, 1250, 1251, 1266, 1284, 1326, 1353, 1369, 1447, 1566, 1790, 1754, 1632, 1469, 1391, 1353, 1317, 1292, 1282, 1278, 1294, 1306, 1321, 1347, 1382, 1477, 1650, 1854, ]
+ #800x600_F2_CWF_70 - F2_CWF
+ - ct: 4230
+ resolution: 800x600
+ r: [2065, 1886, 1745, 1661, 1619, 1574, 1532, 1504, 1498, 1499, 1533, 1586, 1628, 1689, 1770, 1942, 2140, 1978, 1796, 1688, 1627, 1565, 1501, 1446, 1424, 1407, 1419, 1460, 1525, 1583, 1642, 1712, 1829, 2032, 1880, 1732, 1643, 1579, 1499, 1418, 1356, 1319, 1300, 1320, 1372, 1443, 1536, 1598, 1661, 1763, 1923, 1812, 1689, 1608, 1535, 1429, 1335, 1267, 1223, 1210, 1234, 1284, 1362, 1461, 1547, 1634, 1715, 1848, 1755, 1664, 1579, 1600, 1362, 1262, 1188, 1145, 1132, 1156, 1211, 1289, 1403, 1504, 1604, 1688, 1791, 1726, 1635, 1548, 1433, 1298, 1199, 1126, 1084, 1080, 1101, 1147, 1226, 1340, 1468, 1586, 1659, 1752, 1707, 1624, 1522, 1393, 1256, 1155, 1085, 1054, 1043, 1059, 1111, 1187, 1302, 1435, 1566, 1645, 1732, 1695, 1605, 1508, 1367, 1230, 1132, 1066, 1034, 1028, 1042, 1084, 1160, 1275, 1418, 1549, 1634, 1722, 1681, 1604, 1498, 1360, 1222, 1121, 1058, 1027, 1024, 1034, 1075, 1151, 1264, 1407, 1543, 1633, 1723, 1691, 1609, 1498, 1361, 1231, 1130, 1064, 1037, 1027, 1043, 1083, 1162, 1275, 1413, 1545, 1638, 1714, 1692, 1612, 1515, 1385, 1258, 1153, 1087, 1051, 1045, 1064, 1109, 1185, 1295, 1437, 1560, 1645, 1741, 1712, 1627, 1538, 1417, 1298, 1199, 1124, 1087, 1075, 1101, 1146, 1231, 1342, 1472, 1574, 1665, 1754, 1743, 1637, 1572, 1466, 1357, 1253, 1181, 1142, 1131, 1154, 1207, 1295, 1401, 1515, 1601, 1687, 1789, 1807, 1661, 1597, 1525, 1425, 1328, 1257, 1215, 1208, 1230, 1282, 1363, 1459, 1555, 1800, 1714, 1857, 1871, 1711, 1631, 1573, 1491, 1407, 1343, 1307, 1298, 1323, 1368, 1440, 1528, 1601, 1649, 1767, 1932, 1982, 1788, 1675, 1617, 1559, 1489, 1433, 1406, 1405, 1425, 1457, 1516, 1581, 1623, 1713, 1836, 2044, 2041, 1885, 1730, 1646, 1589, 1547, 1498, 1476, 1474, 1488, 1518, 1569, 1594, 1656, 1757, 1921, 2111, ]
+ gr: [1765, 1633, 1502, 1441, 1411, 1389, 1365, 1356, 1350, 1358, 1375, 1408, 1434, 1476, 1534, 1678, 1820, 1671, 1535, 1450, 1410, 1381, 1341, 1311, 1297, 1288, 1295, 1323, 1368, 1407, 1437, 1600, 1580, 1736, 1595, 1488, 1424, 1388, 1342, 1293, 1255, 1230, 1219, 1235, 1270, 1319, 1384, 1413, 1452, 1524, 1657, 1534, 1452, 1399, 1367, 1300, 1238, 1194, 1162, 1155, 1171, 1209, 1267, 1336, 1393, 1435, 1486, 1591, 1491, 1429, 1389, 1335, 1255, 1189, 1139, 1108, 1104, 1118, 1156, 1218, 1302, 1369, 1422, 1470, 1540, 1456, 1416, 1370, 1305, 1216, 1146, 1093, 1068, 1064, 1078, 1116, 1176, 1268, 1345, 1415, 1451, 1510, 1445, 1409, 1352, 1280, 1185, 1113, 1065, 1041, 1039, 1051, 1085, 1147, 1235, 1330, 1402, 1440, 1499, 1444, 1399, 1349, 1261, 1171, 1096, 1050, 1029, 1030, 1037, 1070, 1127, 1217, 1314, 1395, 1437, 1490, 1437, 1401, 1346, 1256, 1161, 1091, 1043, 1026, 1024, 1034, 1064, 1123, 1210, 1308, 1390, 1436, 1490, 1441, 1409, 1346, 1262, 1170, 1097, 1049, 1030, 1029, 1040, 1069, 1129, 1216, 1315, 1393, 1439, 1490, 1458, 1413, 1357, 1280, 1194, 1118, 1065, 1044, 1043, 1055, 1088, 1151, 1235, 1331, 1404, 1448, 1513, 1475, 1426, 1378, 1304, 1225, 1149, 1098, 1074, 1067, 1083, 1122, 1187, 1268, 1356, 1411, 1465, 1530, 1505, 1439, 1402, 1339, 1268, 1197, 1144, 1119, 1110, 1129, 1167, 1232, 1313, 1383, 1428, 1481, 1563, 1564, 1455, 1415, 1373, 1313, 1249, 1203, 1173, 1167, 1184, 1227, 1284, 1349, 1404, 1449, 1499, 1617, 1620, 1493, 1428, 1402, 1354, 1303, 1261, 1236, 1228, 1250, 1285, 1333, 1389, 1428, 1444, 1544, 1684, 1710, 1568, 1462, 1428, 1394, 1354, 1315, 800, 1298, 1317, 1337, 1381, 1411, 1428, 1491, 1594, 1774, 1755, 1632, 1496, 1430, 1395, 1370, 1330, 1328, 1322, 1331, 1348, 1378, 1392, 1426, 1503, 1657, 1810, ]
+ gb: [1773, 1627, 1500, 1438, 1403, 1382, 1352, 1341, 1336, 1344, 1365, 1404, 1435, 1476, 1545, 1692, 1839, 1672, 1540, 1450, 1406, 1376, 1332, 1298, 1282, 1274, 1284, 1312, 1363, 1405, 1440, 1483, 1594, 1751, 1608, 1494, 1426, 1391, 1341, 1284, 1247, 1219, 1207, 1224, 1263, 1318, 1388, 1423, 1460, 1542, 1678, 1545, 1463, 1407, 1368, 1298, 1235, 1188, 1153, 1148, 1163, 1207, 1268, 1345, 1402, 1450, 1506, 1613, 1499, 1442, 1399, 1342, 1259, 1187, 1135, 1103, 1096, 1116, 1157, 1222, 1310, 1382, 1436, 1489, 1564, 1475, 1434, 1382, 1315, 1221, 1145, 1093, 1065, 1061, 1076, 1115, 1182, 1278, 1364, 1431, 1474, 1541, 1461, 1425, 1368, 1290, 1193, 1118, 1064, 1041, 1037, 1050, 1090, 1154, 1246, 1346, 1420, 1466, 1525, 1463, 1416, 1363, 1273, 1178, 1097, 1051, 1030, 1029, 1039, 1073, 1136, 1232, 1332, 1414, 1460, 1519, 1452, 1420, 1357, 1268, 1172, 1094, 1045, 1026, 1024, 1034, 1067, 1131, 1223, 1324, 1409, 1458, 1521, 1460, 1420, 1359, 1271, 1175, 1099, 1048, 1029, 1027, 1038, 1072, 1136, 1227, 1330, 1412, 1458, 1524, 1467, 1424, 1368, 1289, 1197, 1117, 1063, 1040, 1038, 1053, 1089, 1156, 1246, 1345, 1415, 1470, 1538, 1486, 1437, 1384, 1309, 1224, 1146, 1091, 1067, 1063, 1077, 1118, 1187, 1278, 1367, 1425, 1600, 1553, 1519, 1445, 1408, 1342, 1266, 1192, 1136, 1106, 1102, 1119, 1161, 1230, 1316, 1389, 1438, 1495, 1583, 1567, 1460, 1420, 1374, 1310, 1241, 1189, 1158, 1152, 1173, 1214, 1278, 1348, 1410, 1456, 1511, 1634, 1624, 1498, 1427, 1400, 1346, 1294, 1244, 1219, 1210, 1232, 1271, 1321, 1384, 1430, 1448, 1557, 1697, 1719, 1560, 1458, 1421, 1381, 1338, 1298, 1274, 1275, 1292, 1318, 1365, 1404, 1424, 1489, 1601, 1785, 1751, 1637, 1497, 1429, 1389, 1361, 1323, 1311, 1309, 1318, 1339, 1374, 1388, 1429, 1513, 1674, 1829, ]
+ b: [1800, 1643, 1486, 1416, 1376, 1354, 1329, 1318, 1309, 1310, 1331, 1359, 1390, 1444, 1533, 1708, 1846, 1664, 1510, 1400, 1351, 1324, 1286, 1260, 1246, 1235, 1244, 1266, 1306, 1341, 1373, 1441, 1556, 1734, 1557, 1441, 1360, 1322, 1282, 1242, 1211, 1188, 1180, 1186, 1220, 1258, 1309, 1346, 1391, 1475, 1626, 1484, 1400, 1331, 1300, 1247, 1202, 1163, 1135, 1127, 1143, 1170, 1215, 1274, 1315, 1365, 1417, 1555, 1422, 1368, 1316, 1270, 1209, 1158, 1117, 1088, 1084, 1094, 1130, 1174, 1240, 800, 1343, 1389, 1497, 1383, 1351, 1299, 1247, 1177, 1122, 1081, 1057, 1051, 1067, 1094, 1142, 1209, 1274, 1329, 1362, 1461, 1367, 1333, 1284, 1224, 1153, 1098, 1056, 1040, 1035, 1042, 1070, 1118, 1186, 1255, 1314, 1349, 1441, 1355, 1327, 1275, 1209, 1137, 1082, 1044, 1029, 1026, 1034, 1056, 1100, 1166, 1241, 1302, 1341, 1439, 1343, 1325, 1270, 1201, 1130, 1075, 1037, 1024, 1026, 1030, 1050, 1094, 1160, 1231, 1295, 1334, 1434, 1347, 1330, 1274, 1203, 1135, 1079, 1040, 1026, 1024, 1031, 1054, 1097, 1161, 1231, 1292, 1338, 1433, 1358, 1330, 1280, 1219, 1152, 1093, 1051, 1032, 1030, 1043, 1067, 1115, 1173, 1237, 1298, 1348, 1447, 1382, 1342, 1298, 1236, 1174, 1115, 1071, 1051, 1044, 1060, 1088, 1138, 1197, 1259, 1301, 1365, 1464, 1410, 1360, 1314, 1259, 1205, 1149, 1104, 1079, 1075, 1090, 1123, 1171, 1227, 1277, 1315, 1387, 1508, 1476, 1376, 1330, 1287, 1238, 1188, 1144, 1122, 1115, 1132, 1165, 1206, 1249, 1294, 1344, 1402, 1567, 1548, 1431, 1348, 1314, 1271, 1224, 1190, 1168, 1163, 1182, 1210, 1246, 1286, 1318, 1344, 1462, 1650, 1658, 1510, 1386, 1342, 1305, 1268, 1232, 1220, 1221, 1236, 1250, 1283, 1311, 1328, 1406, 1530, 1755, 1698, 1587, 1431, 1350, 1304, 1274, 1244, 1238, 1239, 1245, 1262, 1283, 1293, 1339, 1439, 1608, 1825, ]
+ #800x600_D50_70 - D50
+ - ct: 5003
+ resolution: 800x600
+ r: [2543, 2578, 2509, 2438, 2318, 2233, 2133, 2085, 2088, 2130, 2245, 2390, 2533, 2674, 2811, 2910, 2790, 2536, 2518, 2407, 2309, 2153, 2048, 1910, 1861, 1865, 1921, 2013, 2160, 2340, 2523, 2664, 2836, 2882, 2501, 2408, 2276, 2127, 1951, 1804, 1701, 1655, 1635, 1674, 1771, 1939, 2141, 2356, 2565, 2701, 2839, 2403, 2314, 2154, 1963, 1779, 1618, 1511, 1447, 1433, 1470, 1554, 1714, 1920, 2196, 2430, 2589, 2694, 2352, 2232, 2049, 1828, 1635, 1472, 1357, 1295, 1274, 1317, 1399, 1543, 1785, 2021, 2302, 2494, 2688, 2254, 2143, 1936, 1720, 1509, 1345, 1237, 1168, 1158, 1188, 1271, 1420, 1614, 1894, 2190, 2443, 2592, 2210, 2085, 1870, 1630, 1432, 1264, 1161, 1090, 1079, 1102, 1184, 1329, 1525, 1797, 2112, 2377, 2587, 2224, 2063, 1822, 1598, 1381, 1217, 1121, 1045, 1031, 1063, 1129, 1270, 1481, 1749, 2059, 2344, 2559, 2234, 2083, 1812, 1592, 1381, 1215, 1102, 1046, 1024, 1053, 1122, 1257, 1466, 1734, 2045, 2338, 2530, 2224, 2063, 1856, 1610, 1407, 1237, 1126, 1063, 1044, 1072, 1145, 1288, 1485, 1764, 2059, 2344, 2539, 2273, 2135, 1906, 1675, 1470, 1299, 1187, 1112, 1094, 1120, 1208, 1348, 1546, 1828, 2124, 2377, 2566, 2321, 2197, 1986, 1779, 1563, 1402, 1271, 1209, 1192, 1221, 1313, 1461, 1664, 1929, 2203, 2460, 2659, 2371, 2292, 2119, 1906, 1700, 1538, 1407, 1335, 1321, 1366, 1447, 1593, 1800, 2062, 2331, 2570, 2737, 2485, 2382, 2262, 2078, 1876, 1721, 1587, 1525, 1504, 1545, 1633, 1785, 1985, 2246, 2464, 2631, 2799, 2621, 2465, 2387, 2243, 2063, 1912, 1801, 1734, 1705, 1755, 1848, 2005, 2213, 2417, 2584, 2773, 2900, 2757, 2632, 2519, 2419, 2283, 2160, 2044, 1976, 1979, 2024, 2107, 2272, 2430, 2578, 2731, 2921, 2984, 2724, 2762, 2663, 2570, 2413, 2331, 2245, 2227, 2242, 2278, 2369, 2486, 2647, 2763, 2864, 3041, 2860, ]
+ gr: [2123, 2151, 2065, 2008, 1917, 1836, 1766, 1738, 1740, 1752, 1817, 1882, 1943, 2023, 2110, 2206, 2123, 2143, 2093, 2006, 1915, 1810, 1724, 1632, 1597, 1588, 1608, 1665, 1733, 1827, 1928, 2014, 2122, 2189, 2104, 2052, 1936, 1805, 1686, 1575, 1502, 1464, 1446, 1461, 1512, 1597, 1705, 1827, 1949, 2027, 2124, 2066, 1962, 1856, 1704, 1563, 1450, 1376, 1323, 1310, 1323, 1371, 1466, 1570, 1714, 1868, 1954, 2066, 1997, 1917, 1771, 1622, 1466, 1351, 1258, 1217, 1199, 1211, 1265, 1351, 1469, 1622, 1781, 1891, 1989, 1958, 1863, 1700, 1537, 1382, 1265, 1182, 1133, 1118, 1128, 1178, 1254, 1385, 1537, 1695, 1838, 1943, 1935, 1829, 1642, 1480, 1319, 1202, 1122, 1078, 1061, 1073, 1114, 1196, 1316, 1477, 1655, 1806, 1913, 1953, 1794, 1639, 1442, 1288, 1171, 1089, 1047, 1031, 1044, 1083, 1153, 1279, 1436, 1623, 1783, 1924, 1940, 1807, 1621, 1442, 1283, 1166, 1083, 1041, 1024, 1034, 1073, 1147, 1270, 1436, 1608, 1768, 1897, 1968, 1828, 1639, 1470, 1297, 1182, 1096, 1055, 1038, 1050, 1090, 1168, 1290, 1442, 1627, 1783, 1917, 1942, 1841, 1682, 1510, 1349, 1222, 1132, 1088, 1067, 1081, 1127, 1206, 1326, 1486, 1651, 1811, 1942, 2005, 1901, 1743, 1578, 1422, 1303, 1209, 1152, 1135, 1148, 1191, 1280, 1399, 1548, 1719, 1845, 1974, 2057, 1952, 1830, 1685, 1512, 1393, 1305, 1245, 1221, 1233, 1289, 1372, 1489, 1634, 1776, 1904, 2031, 2113, 2007, 1918, 1777, 1640, 1511, 1423, 1360, 1344, 1360, 1400, 1494, 1608, 1742, 1862, 1976, 2123, 2199, 2104, 2006, 1879, 1756, 1649, 1553, 1502, 1480, 1495, 1546, 1633, 1732, 1839, 1956, 2052, 2210, 2300, 2191, 2104, 2010, 1907, 1802, 1717, 1669, 1655, 1673, 1717, 1792, 1878, 1955, 2054, 2222, 2274, 2310, 2336, 2195, 2103, 2012, 1925, 1861, 1823, 1814, 1844, 1889, 1931, 2004, 2079, 2166, 2287, 2213, ]
+ gb: [2166, 2183, 2106, 2056, 1961, 1889, 1800, 1772, 1760, 1791, 1821, 1907, 1948, 2040, 2115, 2205, 2191, 2197, 2125, 2062, 1973, 1862, 1758, 1680, 1620, 1612, 1636, 1693, 1758, 1851, 1953, 2031, 2125, 2174, 2125, 2067, 1974, 1852, 1719, 1621, 1532, 1477, 1465, 1480, 1535, 1605, 1724, 1852, 1967, 2050, 2156, 2107, 2015, 1893, 1738, 1608, 1485, 1406, 1337, 1319, 1337, 1382, 1476, 1589, 1733, 1869, 1985, 2070, 2037, 1948, 1806, 1641, 1501, 1377, 1287, 1227, 1215, 1227, 1274, 1364, 1485, 1645, 1806, 1928, 2028, 1981, 1887, 1728, 1564, 1409, 1285, 1199, 1145, 1125, 1135, 1183, 1270, 1395, 1560, 1733, 1868, 1974, 1965, 1841, 1670, 1509, 1349, 1221, 1138, 1084, 1065, 1073, 1121, 1208, 1332, 1496, 1670, 1835, 1958, 1948, 1818, 1642, 1467, 1315, 1185, 1099, 1052, 1035, 1042, 1084, 1163, 1292, 1458, 1638, 1812, 1948, 1942, 1809, 1635, 1467, 1296, 1178, 1094, 1039, 1024, 1038, 1073, 1157, 1285, 1451, 1640, 1803, 1935, 1948, 1812, 1646, 1483, 1317, 1196, 1107, 1057, 1043, 1053, 1090, 1183, 1296, 1464, 1650, 1818, 1941, 1965, 1841, 1687, 1519, 1362, 1243, 1145, 1094, 1075, 1088, 1137, 1225, 1339, 1512, 1692, 1835, 1988, 1981, 1893, 1738, 1586, 1435, 1314, 1218, 1160, 1143, 1158, 1212, 1294, 1418, 1578, 1742, 1887, 2005, 2037, 1948, 1838, 1674, 1527, 1398, 1309, 1251, 1236, 1253, 1305, 1385, 1514, 1674, 1816, 1934, 2062, 2098, 2015, 1899, 1791, 1656, 1530, 1430, 1379, 1360, 1379, 1428, 1517, 1639, 1781, 1893, 2015, 2117, 2199, 2075, 1988, 1910, 1776, 1664, 1583, 1518, 1502, 1525, 1576, 1668, 1776, 1898, 1981, 2084, 2221, 2269, 2204, 2103, 2021, 1921, 1827, 1751, 1676, 1671, 1693, 1755, 1843, 1927, 2007, 2095, 2224, 2294, 2285, 2285, 2190, 2112, 2009, 1956, 1909, 1853, 1845, 1864, 1921, 1995, 2058, 2137, 2199, 2308, 2231, ]
+ b: [2007, 2014, 1951, 1922, 1856, 1794, 1746, 1720, 1718, 1747, 1818, 1865, 1956, 2026, 2146, 2219, 2251, 2020, 1954, 1914, 1840, 1745, 1673, 1626, 1592, 1586, 1613, 1674, 1732, 1851, 1938, 2030, 2131, 2207, 1927, 1878, 1807, 1732, 1628, 1548, 1486, 1461, 1440, 1465, 1519, 1601, 1715, 1846, 1943, 2018, 2141, 1863, 1826, 1730, 1633, 1515, 1436, 1369, 1326, 1318, 1337, 1399, 1479, 1598, 1729, 1865, 1962, 2051, 1840, 1751, 1653, 1541, 1426, 1333, 1265, 1217, 1214, 1223, 1281, 1373, 1493, 1641, 1794, 1908, 2015, 1803, 1695, 1587, 1462, 1347, 1245, 1173, 1139, 1122, 1139, 1197, 1288, 1404, 1555, 1712, 1845, 1987, 1781, 1659, 1544, 1402, 1284, 1186, 1117, 1075, 1065, 1088, 1131, 1214, 1342, 1504, 1667, 1808, 1945, 1753, 1639, 1509, 1376, 1253, 1152, 1083, 1045, 1040, 1051, 1094, 1177, 1307, 1464, 1630, 1782, 1939, 1752, 1626, 1510, 1370, 1248, 1141, 1076, 1037, 1024, 1043, 1087, 1163, 1299, 1452, 1631, 1789, 1927, 1761, 1639, 1509, 1384, 1259, 1157, 1088, 1049, 1036, 1061, 1103, 1190, 1321, 1469, 1648, 1806, 1939, 1772, 1673, 1550, 1423, 1304, 1194, 1124, 1088, 1073, 1094, 1143, 1231, 1353, 1508, 1673, 1816, 1955, 1794, 1709, 1599, 1495, 1373, 1269, 1191, 1149, 1129, 1159, 1210, 1298, 1429, 1571, 1726, 1854, 2010, 1840, 1759, 1679, 1567, 1448, 1358, 1284, 1234, 1228, 1249, 1306, 1392, 1507, 1647, 1794, 1917, 2076, 1929, 1835, 1760, 1670, 1565, 1470, 1388, 1351, 1335, 1362, 1423, 1511, 1609, 1743, 1865, 1983, 2145, 2028, 1898, 1841, 1761, 1670, 1590, 1519, 1483, 1475, 1505, 1563, 1640, 1749, 1862, 1943, 2078, 2218, 2109, 2014, 1944, 1883, 1812, 1745, 1674, 1630, 1635, 1665, 1717, 1801, 1884, 1967, 2064, 2188, 2295, 2157, 2126, 2020, 1952, 1891, 1833, 1781, 1761, 1773, 1803, 1857, 1943, 2005, 2026, 2159, 2268, 2251, ]
+
+...
diff --git a/src/ipa/rkisp1/data/ov4689.yaml b/src/ipa/rkisp1/data/ov4689.yaml
new file mode 100644
index 00000000..2068684c
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov4689.yaml
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ R: 66
+ Gr: 66
+ Gb: 66
+ B: 66
+...
diff --git a/src/ipa/rkisp1/data/ov5640.yaml b/src/ipa/rkisp1/data/ov5640.yaml
index 232d8ae8..897b83cb 100644
--- a/src/ipa/rkisp1/data/ov5640.yaml
+++ b/src/ipa/rkisp1/data/ov5640.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-%YAML 1.2
+%YAML 1.1
---
version: 1
algorithms:
@@ -10,4 +10,245 @@ algorithms:
Gr: 256
Gb: 256
B: 256
+ - ColorProcessing:
+ - GammaSensorLinearization:
+ x-intervals: [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 ]
+ y:
+ red: [ 0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4095 ]
+ green: [ 0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4095 ]
+ blue: [ 0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4095 ]
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ - ct: 3000
+ r: [
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ ]
+ gr: [
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ ]
+ gb: [
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ ]
+ b: [
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ ]
+ - ct: 7000
+ r: [
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ ]
+ gr: [
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ ]
+ gb: [
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ ]
+ b: [
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ ]
+ - DefectPixelClusterCorrection:
+ fixed-set: false
+ sets:
+ # PG, LC, RO, RND, RG
+ - line-threshold:
+ green: 8
+ red-blue: 8
+ line-mad-factor:
+ green: 4
+ red-blue: 4
+ pg-factor:
+ green: 8
+ red-blue: 8
+ rnd-threshold:
+ green: 10
+ red-blue: 10
+ rg-factor:
+ green: 32
+ red-blue: 32
+ ro-limits:
+ green: 1
+ red-blue: 1
+ rnd-offsets:
+ green: 2
+ red-blue: 2
+ # PG, LC, RO
+ - line-threshold:
+ green: 24
+ red-blue: 32
+ line-mad-factor:
+ green: 16
+ red-blue: 24
+ pg-factor:
+ green: 6
+ red-blue: 8
+ ro-limits:
+ green: 2
+ red-blue: 2
+ # PG, LC, RO, RND, RG
+ - line-threshold:
+ green: 32
+ red-blue: 32
+ line-mad-factor:
+ green: 4
+ red-blue: 4
+ pg-factor:
+ green: 10
+ red-blue: 10
+ rnd-threshold:
+ green: 6
+ red-blue: 8
+ rg-factor:
+ green: 4
+ red-blue: 4
+ ro-limits:
+ green: 1
+ red-blue: 2
+ rnd-offsets:
+ green: 2
+ red-blue: 2
+ - Dpf:
+ DomainFilter:
+ g: [ 16, 16, 16, 16, 16, 16 ]
+ rb: [ 16, 16, 16, 16, 16, 16 ]
+ NoiseLevelFunction:
+ coeff: [
+ 1023, 1023, 1023, 1023, 1023, 1023, 1023, 1023,
+ 1023, 1023, 1023, 1023, 1023, 1023, 1023, 1023,
+ 1023
+ ]
+ scale-mode: "linear"
+ FilterStrength:
+ r: 64
+ g: 64
+ b: 64
+ - Filter:
...
diff --git a/src/ipa/rkisp1/data/ov5695.yaml b/src/ipa/rkisp1/data/ov5695.yaml
new file mode 100644
index 00000000..2e39e3a5
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov5695.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ #2592x1944_A_70 - A
+ - ct: 2856
+ resolution: 2592x1944
+ r: [2312, 2874, 2965, 2789, 2603, 2424, 2288, 2176, 2151, 2176, 2240, 2345, 2520, 2736, 2856, 2825, 2272, 2675, 3026, 2925, 2693, 2443, 2247, 2074, 1992, 1947, 1972, 2066, 2211, 2386, 2618, 2847, 2953, 2698, 2927, 3008, 2846, 2541, 2272, 2037, 1867, 1782, 1740, 1762, 1855, 1981, 2198, 2454, 2711, 2963, 2927, 2974, 2920, 2664, 2337, 2061, 1822, 1648, 1550, 1503, 1550, 1648, 1794, 1982, 2257, 2565, 2805, 2880, 2933, 2799, 2472, 2161, 1880, 1631, 1457, 1361, 1328, 1364, 1448, 1602, 1817, 2087, 2390, 2698, 2911, 2947, 2734, 2404, 2061, 1759, 1525, 1340, 1244, 1209, 1240, 1343, 1473, 1701, 1975, 2278, 2641, 2823, 2948, 2680, 2342, 1979, 1667, 1425, 1259, 1159, 1125, 1159, 1238, 1407, 1633, 1914, 2235, 2592, 2866, 2936, 2661, 2276, 1908, 1624, 1368, 1190, 1097, 1058, 1086, 1178, 1341, 1556, 1848, 2175, 2509, 2763, 2873, 2603, 2230, 1868, 1578, 1320, 1157, 1058, 1024, 1053, 1142, 1302, 1521, 1789, 2125, 2471, 2760, 2896, 2661, 2276, 1914, 1591, 1349, 1176, 1083, 1044, 1080, 1166, 1327, 1544, 1814, 2141, 2509, 2763, 2969, 2710, 2342, 1985, 1676, 1431, 1250, 1146, 1105, 1140, 1234, 1392, 1616, 1895, 2235, 2578, 2847, 3060, 2800, 2426, 2076, 1764, 1518, 1335, 1227, 1197, 1227, 1314, 1486, 1696, 1989, 2298, 2641, 2863, 2978, 2853, 2496, 2169, 1880, 1631, 1457, 1345, 1304, 1334, 1429, 1586, 1811, 2064, 2378, 2698, 2867, 3024, 2960, 2664, 2327, 2054, 1811, 1626, 1517, 1490, 1514, 1597, 1763, 1962, 2229, 2538, 2768, 2926, 3032, 3077, 2864, 2554, 2272, 2052, 1861, 1747, 1716, 1742, 1816, 1995, 2190, 2454, 2727, 2920, 2927, 2849, 3155, 3008, 2772, 2490, 2276, 2121, 2006, 1954, 1978, 2066, 2202, 2408, 2648, 2847, 2977, 2797, 2440, 3116, 3132, 2900, 2738, 2509, 2329, 2239, 2194, 2230, 2298, 2436, 2617, 2825, 2965, 2899, 2312, ]
+ gr: [1557, 1922, 2004, 1947, 1841, 1757, 1689, 1651, 1631, 1647, 1680, 1737, 1835, 1911, 1995, 1941, 1613, 1820, 2038, 1996, 1900, 1779, 1692, 1617, 1565, 1549, 1554, 1594, 1670, 1753, 1875, 1957, 2029, 1848, 2009, 2064, 1956, 1834, 1715, 1601, 1518, 1474, 1446, 1459, 1505, 1582, 1666, 1796, 1935, 2029, 2009, 2013, 2006, 1874, 1731, 1602, 1493, 1409, 1346, 1332, 1348, 1395, 1474, 1576, 1689, 1843, 1944, 2003, 1982, 1931, 1783, 1637, 1496, 1386, 1297, 1238, 1219, 1239, 1284, 1370, 1474, 1601, 1747, 1897, 2000, 1998, 1920, 1755, 1587, 1455, 1325, 1228, 1171, 1159, 1176, 1223, 1311, 1418, 1565, 1707, 1855, 1990, 2007, 1897, 1733, 1574, 1423, 1296, 1183, 1121, 1101, 1132, 1182, 1277, 1396, 1539, 1696, 1866, 1990, 2000, 1870, 1692, 1529, 1377, 1239, 1141, 1077, 1057, 1079, 1141, 1230, 1350, 1493, 1640, 1810, 1961, 1957, 1849, 1669, 1496, 1356, 1212, 1112, 1053, 1024, 1049, 1106, 1203, 1322, 1465, 1615, 1780, 1919, 1969, 1870, 1675, 1515, 1365, 1232, 1128, 1063, 1042, 1068, 1123, 1220, 1345, 1483, 1628, 1788, 1945, 2007, 1917, 1728, 1574, 1420, 1285, 1173, 1115, 1088, 1109, 1170, 1268, 1388, 1532, 1678, 1835, 1999, 2033, 1927, 1760, 1613, 1461, 1334, 1234, 1175, 1145, 1168, 1225, 1311, 1423, 1557, 1726, 1874, 2015, 2000, 1960, 1810, 1641, 1515, 1391, 1292, 1228, 1212, 1232, 1275, 1358, 1462, 1601, 1737, 1883, 1974, 2032, 2006, 1874, 1712, 1594, 1477, 1395, 1329, 1316, 1327, 1375, 1453, 1547, 1671, 1808, 1937, 1994, 2039, 2064, 1971, 1829, 1701, 1608, 1521, 1465, 1441, 1462, 1498, 1571, 1666, 1785, 1921, 2003, 2039, 1886, 2087, 2062, 1926, 1817, 1706, 1637, 1572, 1560, 1572, 1613, 1688, 1774, 1868, 1973, 2029, 1886, 1692, 2020, 2067, 2008, 1897, 1822, 1741, 1704, 1683, 1695, 1727, 1783, 1872, 1977, 2022, 1989, 1639, ]
+ gb: [1553, 1926, 1992, 1930, 1852, 1746, 1675, 1630, 1611, 1622, 1671, 1726, 1804, 1915, 1992, 1955, 1584, 1852, 2043, 2001, 1879, 1773, 1674, 1602, 1548, 1532, 1541, 1583, 1661, 1752, 1867, 1986, 2034, 1881, 1993, 2060, 1976, 1811, 1697, 1590, 1505, 1459, 1439, 1453, 1496, 1579, 1674, 1795, 1940, 2051, 2034, 2018, 2003, 1866, 1735, 1594, 1478, 1396, 1339, 1326, 1339, 1388, 1463, 1579, 1707, 1842, 1980, 2037, 2014, 1950, 1793, 1641, 1509, 1384, 1291, 1229, 1209, 1231, 1283, 1369, 1481, 1625, 1751, 1901, 2023, 2029, 1925, 1750, 1602, 1458, 1330, 1228, 1162, 1144, 1166, 1218, 1308, 1433, 1572, 1730, 1872, 2029, 2020, 1934, 1752, 1578, 1429, 1288, 1181, 1116, 1102, 1130, 1184, 1278, 1400, 1546, 1700, 1870, 2020, 2030, 1899, 1706, 1536, 1388, 1239, 1137, 1074, 1053, 1078, 1134, 1235, 1358, 1509, 1661, 1838, 1989, 1985, 1853, 1682, 1522, 1356, 1209, 1114, 1050, 1024, 1046, 1106, 1206, 1335, 1478, 1623, 1801, 1954, 2005, 1887, 1706, 1536, 1383, 1235, 1131, 1063, 1045, 1059, 1120, 1225, 1356, 1493, 1666, 1815, 1981, 2063, 1948, 1767, 1589, 1438, 1293, 1183, 1116, 1093, 1115, 1174, 1272, 1400, 1546, 1695, 1877, 2012, 2055, 1952, 1795, 1633, 1476, 1347, 1235, 1167, 1146, 1160, 1230, 1323, 1435, 1579, 1730, 1898, 2046, 2059, 1972, 1843, 1666, 1519, 1402, 1291, 1231, 1209, 1233, 1283, 1366, 1481, 1613, 1767, 1922, 2023, 2066, 2036, 1903, 1740, 1609, 1484, 1399, 1337, 1317, 1330, 1378, 1451, 1572, 1689, 1830, 1964, 2037, 2034, 2097, 2005, 1856, 1724, 1608, 1521, 1471, 1450, 1456, 1505, 1593, 1688, 1805, 1940, 2051, 2045, 1974, 2123, 2067, 1958, 1827, 1719, 1633, 1580, 1563, 1576, 1609, 1688, 1783, 1892, 2009, 2053, 1911, 1652, 2078, 2101, 2021, 1915, 1837, 1731, 1682, 1661, 1686, 1717, 1782, 1864, 1982, 2036, 2005, 1669, ]
+ b: [1439, 1756, 1796, 1808, 1716, 1631, 1568, 1537, 1530, 1546, 1578, 1608, 1676, 1744, 1796, 1756, 1456, 1685, 1858, 1830, 1764, 1687, 1603, 1529, 1486, 1489, 1486, 1493, 1552, 1628, 1721, 1812, 1858, 1727, 1837, 1888, 1825, 1726, 1628, 1548, 1478, 1449, 1423, 1434, 1462, 1521, 1566, 1688, 1809, 1888, 1837, 1889, 1857, 1775, 1680, 1576, 1467, 1403, 1336, 1309, 1329, 1369, 1429, 1529, 1623, 1733, 1822, 1868, 1852, 1828, 1704, 1585, 1486, 1377, 1285, 1237, 1216, 1232, 1268, 1344, 1438, 1536, 1667, 1764, 1813, 1853, 1815, 1675, 1576, 1436, 1333, 1226, 1158, 1145, 1158, 1216, 1298, 1407, 1503, 1640, 1754, 1816, 1908, 1800, 1691, 1536, 1422, 1296, 1188, 1114, 1095, 1114, 1174, 1268, 1388, 1485, 1623, 1742, 1851, 1865, 1783, 1646, 1513, 1378, 1236, 1124, 1071, 1050, 1074, 1132, 1211, 1333, 1463, 1603, 1713, 1829, 1822, 1736, 1621, 1486, 1358, 1211, 1109, 1040, 1024, 1037, 1101, 1197, 1314, 1423, 1559, 1683, 1788, 1829, 1769, 1635, 1513, 1371, 1231, 1128, 1057, 1033, 1057, 1112, 1202, 1327, 1455, 1572, 1700, 1794, 1870, 1831, 1679, 1554, 1430, 1290, 1170, 1103, 1091, 1107, 1165, 1263, 1374, 1501, 1623, 1742, 1833, 1911, 1863, 1724, 1586, 1459, 1352, 1236, 1171, 1153, 1171, 1221, 1315, 1414, 1520, 1663, 1799, 1872, 1913, 1861, 1730, 1626, 1511, 1397, 1296, 1242, 1221, 1227, 1279, 1350, 1446, 1555, 1691, 1779, 1852, 1934, 1893, 1804, 1703, 1576, 1475, 1396, 1329, 1309, 1336, 1363, 1437, 1538, 1634, 1747, 1839, 1868, 1955, 1991, 1910, 1808, 1696, 1596, 1537, 1472, 1445, 1457, 1494, 1539, 1617, 1739, 1825, 1928, 1860, 1818, 2015, 1981, 1906, 1778, 1680, 1627, 1585, 1551, 1566, 1596, 1646, 1725, 1824, 1902, 1945, 1794, 1571, 1937, 1977, 1932, 1866, 1784, 1714, 1674, 1642, 1662, 1678, 1730, 1788, 1859, 1913, 1912, 1592, ]
+ #2592x1944_D65_70 - D65
+ - ct: 6504
+ resolution: 2592x1944
+ r: [2457, 2985, 2981, 2763, 2587, 2383, 2222, 2123, 2089, 2123, 2167, 2270, 2466, 2638, 2823, 2805, 2457, 2770, 3097, 2893, 2640, 2410, 2169, 2039, 1933, 1908, 1914, 1973, 2117, 2295, 2514, 2728, 2953, 2735, 3009, 2991, 2771, 2467, 2201, 1985, 1825, 1726, 1679, 1703, 1791, 1924, 2085, 2345, 2583, 2806, 2898, 3015, 2906, 2586, 2267, 2005, 1790, 1629, 1527, 1488, 1505, 1597, 1734, 1923, 2169, 2447, 2714, 2876, 2953, 2756, 2435, 2120, 1832, 1617, 1462, 1359, 1326, 1351, 1423, 1573, 1774, 2014, 2285, 2612, 2857, 2963, 2676, 2324, 2016, 1735, 1499, 1334, 1234, 1201, 1227, 1313, 1452, 1649, 1893, 2177, 2503, 2754, 2883, 2582, 2252, 1912, 1634, 1401, 1236, 1144, 1106, 1135, 1215, 1365, 1570, 1804, 2091, 2443, 2715, 2839, 2555, 2196, 1860, 1576, 1346, 1180, 1084, 1046, 1077, 1161, 1305, 1501, 1767, 2056, 2384, 2678, 2797, 2546, 2165, 1832, 1546, 1314, 1150, 1060, 1024, 1046, 1133, 1275, 1474, 1726, 2030, 2378, 2667, 2811, 2555, 2169, 1843, 1564, 1321, 1161, 1069, 1032, 1057, 1146, 1289, 1496, 1751, 2021, 2350, 2653, 2883, 2603, 2195, 1884, 1614, 1388, 1219, 1116, 1077, 1107, 1196, 1335, 1529, 1787, 2079, 2406, 2689, 2900, 2630, 2293, 1963, 1677, 1462, 1294, 1194, 1157, 1181, 1274, 1403, 1622, 1847, 2163, 2464, 2727, 2920, 2731, 2400, 2071, 1798, 1567, 1404, 1301, 1264, 1293, 1376, 1514, 1711, 1949, 2224, 2568, 2767, 3015, 2820, 2545, 2196, 1933, 1719, 1554, 1452, 1422, 1442, 1525, 1661, 1847, 2078, 2358, 2639, 2780, 2971, 2927, 2674, 2396, 2110, 1904, 1767, 1654, 1611, 1627, 1720, 1848, 2026, 2250, 2540, 2722, 2863, 2842, 3023, 2864, 2576, 2311, 2105, 1952, 1857, 1808, 1830, 1912, 2033, 2205, 2417, 2652, 2822, 2667, 2489, 3024, 2981, 2737, 2546, 2317, 2180, 2086, 2041, 2050, 2140, 2255, 2391, 2615, 2735, 2840, 2366, ]
+ gr: [1766, 2092, 2109, 2006, 1875, 1775, 1707, 1659, 1633, 1646, 1679, 1754, 1844, 1954, 2045, 2041, 1740, 1981, 2142, 2048, 1911, 1779, 1678, 1597, 1549, 1529, 1539, 1570, 1630, 1728, 1848, 1970, 2064, 1971, 2109, 2107, 1982, 1820, 1673, 1563, 1494, 1442, 1423, 1433, 1472, 1538, 1630, 1751, 1899, 2019, 2058, 2121, 2066, 1892, 1719, 1584, 1472, 1386, 1331, 1311, 1326, 1370, 1441, 1533, 1673, 1820, 1956, 2062, 2080, 1982, 1807, 1636, 1493, 1379, 1293, 1236, 1213, 1230, 1280, 1353, 1458, 1580, 1729, 1885, 2017, 2074, 1934, 1756, 1584, 1435, 1318, 1220, 1163, 1142, 1154, 1207, 1280, 1393, 1522, 1666, 1844, 1990, 2041, 1886, 1711, 1535, 1392, 1269, 1165, 1106, 1086, 1103, 1151, 1240, 1356, 1479, 1635, 1802, 1969, 2006, 1856, 1673, 1506, 1359, 1220, 1131, 1067, 1041, 1056, 1113, 1201, 1312, 1446, 1594, 1771, 1937, 2000, 1841, 1654, 1489, 1334, 1201, 1105, 1046, 1024, 1038, 1096, 1183, 1299, 1428, 1577, 1746, 1925, 2006, 1850, 1656, 1490, 1339, 1210, 1112, 1054, 1028, 1044, 1098, 1188, 1296, 1431, 1574, 1754, 1923, 2033, 1868, 1692, 1518, 1366, 1242, 1143, 1085, 1060, 1074, 1133, 1214, 1329, 1460, 1602, 1780, 1938, 2040, 1900, 1722, 1547, 1409, 1291, 1192, 1131, 1107, 1125, 1174, 1258, 1363, 1488, 1644, 1813, 1958, 2052, 1939, 1770, 1592, 1461, 1346, 1254, 1192, 1174, 1186, 1236, 1312, 1410, 1535, 1690, 1846, 1975, 2071, 1986, 1843, 1664, 1533, 1424, 1338, 1280, 1256, 1269, 1309, 1387, 1475, 1596, 1753, 1898, 2006, 2058, 2045, 1906, 1756, 1622, 1517, 1432, 1380, 1363, 1372, 1412, 1480, 1566, 1691, 1835, 1955, 2008, 1971, 2083, 2008, 1842, 1718, 1606, 1530, 1488, 1463, 1468, 1506, 1574, 1675, 1772, 1904, 1992, 1922, 1748, 2103, 2063, 1961, 1838, 1724, 1648, 1600, 1596, 1592, 1627, 1690, 1780, 1890, 1969, 1992, 1713, ]
+ gb: [1749, 2093, 2072, 1983, 1869, 1765, 1684, 1638, 1621, 1629, 1666, 1734, 1838, 1925, 2019, 2021, 1722, 1981, 2142, 2048, 1904, 1774, 1660, 1582, 1535, 1512, 1528, 1563, 1626, 1728, 1854, 1970, 2064, 1961, 2088, 2107, 1975, 1809, 1668, 1556, 1481, 1424, 1406, 1421, 1456, 1528, 1626, 1761, 1886, 2028, 2068, 2111, 2049, 1873, 1715, 1569, 1465, 1376, 1323, 1300, 1321, 1363, 1432, 1536, 1660, 1808, 1956, 2062, 2089, 1975, 1797, 1632, 1493, 1374, 1284, 1228, 1205, 1226, 1273, 1351, 1449, 1577, 1729, 1898, 2035, 2083, 1934, 1751, 1584, 1441, 1307, 1214, 1156, 1134, 1153, 1203, 1280, 1393, 1526, 1675, 1844, 1998, 2049, 1905, 1702, 1535, 1390, 1265, 1160, 1103, 1078, 1100, 1150, 1238, 1351, 1485, 1631, 1814, 1984, 2014, 1868, 1678, 1506, 1356, 1218, 1123, 1065, 1039, 1055, 1112, 1201, 1317, 1446, 1602, 1782, 1952, 2008, 1853, 1658, 1496, 1344, 1203, 1110, 1046, 1024, 1037, 1091, 1179, 1292, 1428, 1588, 1757, 1947, 2030, 1856, 1660, 1493, 1346, 1212, 1116, 1049, 1024, 1040, 1093, 1190, 1303, 1440, 1590, 1760, 1937, 2041, 1886, 1688, 1522, 1376, 1240, 1146, 1083, 1057, 1074, 1131, 1218, 1331, 1466, 1614, 1785, 1953, 2066, 1920, 1737, 1558, 1415, 1289, 1186, 1130, 1110, 1123, 1172, 1254, 1368, 1492, 1644, 1814, 1974, 2080, 1953, 1775, 1612, 1461, 1343, 1254, 1194, 1174, 1186, 1236, 1309, 1413, 1528, 1695, 1852, 1983, 2081, 2009, 1837, 1678, 1543, 1424, 1338, 1278, 1254, 1273, 1306, 1390, 1485, 1604, 1758, 1905, 2016, 2078, 2062, 1926, 1777, 1626, 1517, 1441, 1388, 1363, 1367, 1412, 1487, 1574, 1686, 1835, 1962, 2018, 1981, 2112, 2016, 1848, 1733, 1614, 1541, 1488, 1469, 1468, 1520, 1570, 1666, 1789, 1911, 1992, 1913, 1776, 2082, 2072, 1968, 1856, 1739, 1657, 1600, 1577, 1592, 1627, 1695, 1786, 1883, 1977, 2002, 1722, ]
+ b: [1681, 1945, 1998, 1882, 1777, 1699, 1617, 1588, 1571, 1554, 1581, 1644, 1729, 1797, 1905, 1919, 1646, 1868, 2012, 1964, 1828, 1711, 1617, 1535, 1492, 1479, 1478, 1509, 1559, 1636, 1737, 1860, 1925, 1830, 1961, 2001, 1890, 1754, 1638, 1529, 1463, 1407, 1389, 1407, 1432, 1485, 1574, 1668, 1790, 1898, 1922, 1995, 1962, 1813, 1680, 1557, 1453, 1378, 1319, 1297, 1302, 1348, 1418, 1505, 1605, 1726, 1868, 1944, 2004, 1901, 1765, 1611, 1482, 1375, 1287, 1230, 1207, 1224, 1259, 1338, 1420, 1528, 1664, 1807, 1921, 1969, 1858, 1708, 1557, 1434, 1317, 1217, 1161, 1142, 1156, 1206, 1275, 1369, 1481, 1598, 1764, 1880, 1973, 1821, 1664, 1516, 1392, 1270, 1165, 1106, 1085, 1095, 1152, 1231, 1336, 1445, 1567, 1725, 1856, 1947, 1804, 1647, 1495, 1359, 1230, 1136, 1067, 1043, 1060, 1115, 1197, 1299, 1419, 1548, 1695, 1834, 1924, 1787, 1623, 1478, 1346, 1212, 1114, 1052, 1024, 1044, 1094, 1172, 1287, 1408, 1532, 1681, 1853, 1925, 1804, 1641, 1481, 1351, 1225, 1124, 1056, 1032, 1046, 1099, 1181, 1296, 1410, 1531, 1688, 1806, 1951, 1821, 1664, 1516, 1377, 1255, 1150, 1089, 1066, 1082, 1128, 1214, 1315, 1432, 1562, 1709, 1856, 1957, 1840, 1688, 1546, 1413, 1297, 1190, 1139, 1116, 1130, 1179, 1259, 1347, 1462, 1592, 1740, 1859, 1968, 1881, 1728, 1588, 1460, 1345, 1265, 1199, 1180, 1191, 1241, 1307, 1391, 1498, 1644, 1773, 1876, 2008, 1940, 1789, 1654, 1531, 1427, 1341, 1286, 1265, 1273, 1316, 1370, 1471, 1569, 1696, 1830, 1896, 2002, 1977, 1871, 1732, 1620, 1519, 1432, 1387, 1362, 1364, 1402, 1466, 1535, 1654, 1782, 1877, 1896, 1895, 2025, 1975, 1828, 1704, 1599, 1540, 1478, 1456, 1459, 1499, 1548, 1636, 1737, 1841, 1925, 1830, 1705, 2013, 2036, 1912, 1785, 1720, 1636, 1588, 1565, 1576, 1599, 1664, 1722, 1815, 1905, 1945, 1681, ]
+ #2592x1944_F2_CWF_70 - F2_CWF
+ - ct: 4230
+ resolution: 2592x1944
+ r: [2512, 2860, 2753, 2554, 2376, 2198, 2033, 1949, 1924, 1921, 2012, 2100, 2257, 2461, 2682, 2775, 2436, 2753, 2915, 2713, 2415, 2193, 2004, 1869, 1790, 1755, 1774, 1844, 1945, 2108, 2306, 2547, 2755, 2697, 2849, 2810, 2526, 2247, 2018, 1821, 1692, 1608, 1577, 1591, 1653, 1775, 1921, 2132, 2371, 2625, 2765, 2881, 2679, 2376, 2077, 1853, 1677, 1542, 1449, 1412, 1430, 1511, 1615, 1781, 1983, 2258, 2517, 2722, 2832, 2589, 2237, 1977, 1718, 1527, 1403, 1319, 1290, 1307, 1370, 1491, 1658, 1850, 2112, 2408, 2708, 2718, 2474, 2154, 1861, 1616, 1439, 1293, 1211, 1176, 1205, 1275, 1390, 1553, 1773, 2008, 2313, 2607, 2661, 2388, 2066, 1781, 1535, 1359, 1207, 1130, 1098, 1117, 1192, 1313, 1474, 1688, 1934, 2240, 2537, 2672, 2353, 2024, 1733, 1494, 1296, 1162, 1075, 1045, 1064, 1146, 1261, 1422, 1640, 1889, 2197, 2528, 2599, 2332, 1991, 1718, 1484, 1276, 1139, 1051, 1024, 1051, 1117, 1245, 1409, 1620, 1861, 2179, 2481, 2651, 2338, 2004, 1719, 1479, 1289, 1146, 1066, 1034, 1055, 1127, 1248, 1413, 1633, 1872, 2184, 2471, 2640, 2372, 2045, 1751, 1514, 1324, 1189, 1107, 1064, 1097, 1163, 1280, 1455, 1661, 1915, 2226, 2498, 2672, 2457, 2107, 1820, 1587, 1390, 1248, 1170, 1132, 1155, 1235, 1353, 1510, 1729, 1967, 2268, 2544, 2781, 2532, 2198, 1920, 1678, 1486, 1349, 1251, 1225, 1251, 1326, 1438, 1602, 1800, 2043, 2343, 2616, 2826, 2637, 2330, 2024, 1796, 1609, 1480, 1391, 1365, 1370, 1442, 1556, 1714, 1915, 2190, 2461, 2673, 2820, 2738, 2472, 2182, 1949, 1760, 1640, 1545, 1517, 1524, 1591, 1716, 1867, 2073, 2308, 2561, 2686, 2782, 2806, 2648, 2352, 2132, 1926, 1819, 1716, 1678, 1702, 1757, 1872, 2029, 2234, 2434, 2611, 2617, 2538, 2919, 2777, 2554, 2345, 2148, 2012, 1940, 1896, 1930, 1961, 2065, 2243, 2426, 2592, 2669, 2461, ]
+ gr: [2065, 2350, 2320, 2148, 2002, 1877, 1794, 1730, 1709, 1712, 1754, 1837, 1948, 2082, 2217, 2291, 2054, 2263, 2359, 2204, 2022, 1860, 1735, 1639, 1583, 1560, 1576, 1619, 1694, 1805, 1967, 2126, 2281, 2228, 2353, 2294, 2112, 1897, 1724, 1615, 1525, 1460, 1441, 1448, 1499, 1581, 1684, 1829, 2000, 2187, 2305, 2354, 2194, 1994, 1785, 1626, 1493, 1406, 1349, 1323, 1342, 1384, 1468, 1576, 1722, 1909, 2100, 2265, 2281, 2126, 1894, 1708, 1539, 1409, 1310, 1253, 1225, 1240, 1291, 1377, 1486, 1639, 1821, 2019, 2220, 2257, 2059, 1819, 1622, 1464, 1337, 1233, 1168, 1144, 1161, 1219, 1302, 1420, 1576, 1733, 1934, 2180, 2189, 1991, 1759, 1578, 1407, 1280, 1164, 1107, 1085, 1100, 1157, 1242, 1359, 1514, 1685, 1894, 2110, 2153, 1954, 1726, 1537, 1365, 1229, 1129, 1066, 1039, 1057, 1114, 1202, 1327, 1471, 1638, 1850, 2094, 2153, 1948, 1718, 1522, 1352, 1217, 1114, 1047, 1024, 1038, 1100, 1187, 1310, 1467, 1627, 1851, 2078, 2162, 1947, 1716, 1527, 1367, 1225, 1125, 1054, 1031, 1045, 1106, 1198, 1320, 1465, 1638, 1861, 2094, 2180, 1964, 1731, 1545, 1383, 1252, 1145, 1085, 1057, 1070, 1131, 1223, 1341, 1488, 1658, 1852, 2077, 2199, 2002, 1787, 1584, 1429, 1297, 1194, 1131, 1109, 1124, 1181, 1266, 1384, 1523, 1695, 1908, 2118, 2260, 2071, 1843, 1651, 1502, 1364, 1265, 1203, 1181, 1197, 1244, 1331, 1451, 1579, 1763, 1969, 2153, 2276, 2150, 1922, 1736, 1573, 1453, 1355, 1296, 1275, 1285, 1335, 1417, 1526, 1663, 1849, 2052, 2203, 2294, 2205, 2029, 1834, 1666, 1548, 1461, 1399, 1372, 1390, 1431, 1513, 1620, 1760, 1931, 2115, 2237, 2228, 2271, 2126, 1934, 1784, 1650, 1577, 1512, 1485, 1506, 1547, 1625, 1729, 1872, 2029, 2189, 2160, 2033, 2326, 2227, 2106, 1935, 1815, 1721, 1671, 1627, 1654, 1688, 1768, 1885, 2021, 2160, 2245, 2022, ]
+ gb: [2062, 2335, 2286, 2148, 1975, 1850, 1776, 1709, 1688, 1709, 1761, 1822, 1943, 2082, 2226, 2300, 2062, 2272, 2345, 2186, 2016, 1856, 1728, 1637, 1579, 1556, 1564, 1610, 1691, 1807, 1961, 2126, 2280, 2237, 2338, 2293, 2081, 1893, 1731, 1594, 1501, 1444, 1424, 1441, 1485, 1572, 1677, 1830, 2022, 2195, 2303, 2352, 2212, 1988, 1782, 1625, 1499, 1400, 1342, 1318, 1335, 1379, 1468, 1579, 1728, 1898, 2116, 2274, 2311, 2127, 1896, 1701, 1538, 1404, 1308, 1249, 1218, 1243, 1290, 1382, 1491, 1641, 1828, 2041, 2249, 2256, 2060, 1820, 1637, 1476, 1335, 1234, 1166, 1147, 1159, 1220, 1302, 1428, 1586, 1754, 1968, 2198, 2225, 2013, 1781, 1584, 1421, 1281, 1166, 1101, 1082, 1105, 1158, 1246, 1372, 1524, 1696, 1914, 2144, 2179, 1961, 1742, 1546, 1378, 1232, 1136, 1064, 1042, 1061, 1118, 1208, 1335, 1489, 1661, 1875, 2110, 2179, 1962, 1734, 1538, 1367, 1224, 1117, 1051, 1024, 1046, 1106, 1195, 1322, 1479, 1658, 1876, 2094, 2179, 1988, 1742, 1543, 1375, 1232, 1128, 1060, 1030, 1050, 1110, 1208, 1330, 1486, 1652, 1881, 2127, 2197, 2006, 1761, 1562, 1396, 1255, 1152, 1086, 1063, 1077, 1137, 1232, 1354, 1504, 1682, 1902, 2135, 2236, 2031, 1810, 1605, 1449, 1311, 1200, 1137, 1110, 1130, 1185, 1275, 1389, 1539, 1720, 1922, 2161, 2290, 2103, 1873, 1675, 1504, 1379, 1276, 1211, 1184, 1202, 1251, 1339, 1460, 1593, 1785, 1983, 2180, 2329, 2176, 1961, 1752, 1598, 1471, 1366, 1308, 1279, 1292, 1348, 1432, 1535, 1682, 1874, 2068, 2222, 2338, 2253, 2059, 1852, 1686, 1565, 1473, 1410, 1385, 1393, 1445, 1522, 1639, 1782, 1959, 2132, 2257, 2272, 2312, 2160, 1961, 1802, 1674, 1587, 1525, 1497, 1508, 1557, 1644, 1741, 1897, 2045, 2197, 2202, 2095, 2335, 2276, 2098, 1969, 1828, 1732, 1669, 1641, 1656, 1699, 1785, 1886, 2036, 2188, 2254, 2030, ]
+ b: [1957, 2184, 2113, 2000, 1876, 1757, 1686, 1620, 1614, 1596, 1649, 1687, 1805, 1914, 2027, 2082, 1880, 2101, 2170, 2056, 1894, 1763, 1659, 1571, 1527, 1501, 1506, 1541, 1608, 1694, 1809, 1964, 2094, 2040, 2156, 2121, 1964, 1796, 1654, 1563, 1485, 1419, 1399, 1407, 1447, 1499, 1587, 1724, 1859, 2019, 2076, 2184, 2063, 1888, 1705, 1586, 1470, 1383, 1330, 1299, 1315, 1352, 1421, 1513, 1633, 1794, 1956, 2125, 2153, 2012, 1821, 1660, 1511, 1395, 1302, 1241, 1219, 1232, 1275, 1352, 1453, 1570, 1726, 1914, 2080, 2106, 1953, 1751, 1601, 1462, 1333, 1235, 1171, 1142, 1156, 1207, 1285, 1403, 1520, 1656, 1838, 2038, 2081, 1885, 1704, 1553, 1398, 1266, 1166, 1101, 1079, 1097, 1151, 1240, 1340, 1471, 1616, 1780, 1970, 2041, 1882, 1686, 1513, 1364, 1235, 1125, 1065, 1037, 1054, 1108, 1196, 1299, 1429, 1576, 1756, 1935, 2049, 1853, 1665, 1504, 1363, 1227, 1118, 1049, 1024, 1035, 1099, 1188, 1298, 1434, 1582, 1752, 1929, 2073, 1870, 1677, 1520, 1364, 1240, 1131, 1057, 1037, 1048, 1102, 1188, 1308, 1442, 1600, 1756, 1921, 2048, 1885, 1695, 1525, 1387, 1248, 1148, 1085, 1064, 1076, 1131, 1215, 1325, 1458, 1591, 1780, 1926, 2089, 1926, 1731, 1563, 1432, 1304, 1191, 1132, 1112, 1129, 1172, 1258, 1359, 1492, 1647, 1814, 1975, 2115, 1983, 1799, 1626, 1491, 1368, 1270, 1212, 1188, 1204, 1249, 1322, 1416, 1548, 1697, 1874, 2045, 2164, 2047, 1888, 1705, 1571, 1451, 1357, 1296, 1276, 1291, 1336, 1404, 1499, 1616, 1772, 1956, 2069, 2177, 2139, 1964, 1785, 1654, 1549, 1459, 1402, 1376, 1385, 1423, 1493, 1587, 1704, 1847, 2003, 2057, 2144, 2190, 2056, 1906, 1753, 1642, 1556, 1506, 1488, 1485, 1534, 1592, 1684, 1809, 1935, 2076, 2081, 1997, 2228, 2150, 2030, 1888, 1799, 1704, 1637, 1631, 1629, 1667, 1716, 1816, 1914, 2043, 2122, 1917, ]
+ #2592x1944_D50_70 - D50
+ - ct: 5003
+ resolution: 2592x1944
+ r: [2445, 2929, 2967, 2734, 2576, 2380, 2211, 2113, 2074, 2072, 2166, 2255, 2383, 2626, 2861, 2812, 2411, 2795, 3067, 2915, 2660, 2369, 2162, 2038, 1940, 1900, 1919, 1978, 2106, 2281, 2519, 2702, 2875, 2718, 2953, 3006, 2761, 2452, 2197, 1964, 1815, 1720, 1676, 1712, 1769, 1899, 2070, 2268, 2581, 2739, 2798, 3022, 2895, 2570, 2275, 2011, 1793, 1619, 1512, 1486, 1506, 1577, 1740, 1898, 2123, 2420, 2659, 2869, 2939, 2776, 2457, 2132, 1863, 1619, 1479, 1366, 1332, 1356, 1435, 1571, 1769, 1978, 2272, 2543, 2736, 2905, 2703, 2360, 2023, 1747, 1516, 1355, 1247, 1214, 1243, 1332, 1457, 1651, 1898, 2194, 2488, 2714, 2945, 2615, 2257, 1937, 1653, 1419, 1242, 1151, 1117, 1138, 1219, 1374, 1575, 1795, 2080, 2417, 2695, 2795, 2558, 2207, 1875, 1586, 1350, 1182, 1089, 1046, 1084, 1158, 1305, 1497, 1736, 2027, 2351, 2624, 2840, 2547, 2201, 1863, 1566, 1323, 1172, 1068, 1024, 1057, 1142, 1288, 1484, 1725, 2010, 2343, 2584, 2857, 2580, 2222, 1875, 1573, 1355, 1182, 1086, 1046, 1072, 1151, 1301, 1509, 1762, 2052, 2371, 2707, 2912, 2615, 2257, 1904, 1631, 1389, 1227, 1129, 1090, 1122, 1197, 1331, 1529, 1777, 2040, 2397, 2639, 2905, 2628, 2290, 1987, 1698, 1457, 1296, 1202, 1154, 1181, 1259, 1398, 1607, 1826, 2119, 2466, 2684, 2939, 2748, 2399, 2078, 1796, 1584, 1424, 1310, 1276, 1297, 1377, 1519, 1708, 1943, 2222, 2543, 2736, 2982, 2863, 2570, 2243, 1964, 1740, 1570, 1470, 1435, 1448, 1537, 1683, 1856, 2094, 2342, 2632, 2798, 3037, 2970, 2681, 2413, 2111, 1920, 1769, 1672, 1616, 1634, 1709, 1847, 2019, 2234, 2488, 2709, 2835, 2836, 3026, 2851, 2611, 2315, 2106, 1932, 1836, 1801, 1807, 1899, 2027, 2199, 2392, 2620, 2805, 2644, 2515, 3013, 2967, 2792, 2553, 2343, 2181, 2046, 2035, 2033, 2108, 2239, 2444, 2575, 2731, 2812, 2411, ]
+ gr: [1764, 2120, 2133, 2015, 1886, 1783, 1704, 1644, 1626, 1631, 1666, 1739, 1792, 1938, 2020, 2014, 1727, 1988, 2163, 2079, 1945, 1797, 1681, 1595, 1551, 1526, 1533, 1567, 1619, 1707, 1833, 1963, 2052, 1936, 2115, 2119, 1964, 1824, 1676, 1555, 1486, 1428, 1406, 1425, 1447, 1526, 1623, 1720, 1866, 2001, 2030, 2142, 2062, 1902, 1716, 1580, 1465, 1376, 1321, 1301, 1314, 1355, 1428, 1513, 1645, 1791, 1941, 2022, 2104, 1988, 1816, 1663, 1515, 1388, 1294, 1235, 1215, 1225, 1271, 1350, 1449, 1571, 1719, 1880, 2028, 2113, 1963, 1766, 1588, 1445, 1325, 1231, 1168, 1142, 1155, 1213, 1284, 1392, 1517, 1662, 1835, 1980, 2065, 1897, 1712, 1544, 1394, 1268, 1163, 1105, 1080, 1097, 1147, 1225, 1348, 1464, 1603, 1780, 1948, 2044, 1877, 1672, 1512, 1355, 1223, 1127, 1057, 1038, 1052, 1107, 1193, 1312, 1437, 1593, 1741, 1931, 2004, 1873, 1674, 1501, 1350, 1211, 1113, 1048, 1024, 1038, 1095, 1180, 1301, 1424, 1571, 1738, 1895, 2027, 1871, 1681, 1506, 1361, 1227, 1123, 1064, 1035, 1057, 1104, 1189, 1310, 1440, 1573, 1758, 1916, 2048, 1884, 1707, 1526, 1374, 1248, 1154, 1087, 1069, 1073, 1128, 1205, 1317, 1455, 1590, 1757, 1925, 2031, 1907, 1720, 1557, 1406, 1289, 1193, 1129, 1104, 1116, 1170, 1244, 1348, 1478, 1621, 1792, 1947, 2075, 1973, 1777, 1615, 1465, 1355, 1269, 1195, 1176, 1184, 1234, 1302, 1412, 1532, 1669, 1826, 1975, 2100, 2028, 1870, 1687, 1542, 1443, 1352, 1294, 1264, 1278, 1324, 1393, 1492, 1602, 1757, 1911, 2031, 2093, 2054, 1935, 1763, 1631, 1529, 1441, 1393, 1361, 1371, 1419, 1480, 1569, 1690, 1827, 1960, 2020, 1957, 2091, 1979, 1864, 1722, 1619, 1529, 1484, 1458, 1471, 1497, 1557, 1654, 1761, 1918, 2005, 1907, 1783, 2076, 2094, 1938, 1829, 1729, 1657, 1592, 1571, 1572, 1616, 1664, 1769, 1880, 1968, 1994, 1718, ]
+ gb: [1771, 2117, 2122, 1999, 1887, 1768, 1691, 1633, 1619, 1633, 1668, 1736, 1836, 1923, 2010, 2002, 1734, 2040, 2161, 2070, 1925, 1777, 1678, 1601, 1532, 1528, 1518, 1562, 1625, 1724, 1840, 1956, 2079, 1954, 2091, 2109, 1965, 1826, 1669, 1561, 1472, 1419, 1400, 1422, 1450, 1521, 1608, 1732, 1867, 2001, 2028, 2151, 2053, 1877, 1718, 1579, 1465, 1379, 1319, 1296, 1309, 1350, 1428, 1530, 1647, 1792, 1934, 2030, 2112, 2003, 1824, 1656, 1511, 1388, 1296, 1240, 1206, 1228, 1271, 1347, 1458, 1577, 1725, 1894, 2018, 2112, 1978, 1778, 1602, 1451, 1325, 1231, 1165, 1141, 1154, 1207, 1292, 1397, 1530, 1687, 1849, 2030, 2056, 1911, 1723, 1554, 1396, 1271, 1165, 1103, 1077, 1100, 1148, 1236, 1343, 1477, 1626, 1798, 1972, 2027, 1885, 1692, 1522, 1358, 1225, 1126, 1068, 1038, 1055, 1105, 1194, 1313, 1443, 1583, 1771, 1931, 2037, 1868, 1690, 1514, 1355, 1216, 1116, 1053, 1024, 1046, 1096, 1191, 1306, 1433, 1586, 1762, 1925, 2061, 1891, 1688, 1522, 1363, 1236, 1128, 1067, 1037, 1059, 1110, 1196, 1318, 1439, 1596, 1765, 1977, 2056, 1898, 1709, 1535, 1391, 1264, 1157, 1089, 1069, 1076, 1131, 1216, 1335, 1467, 1596, 1775, 1948, 2048, 1929, 1737, 1567, 1427, 1294, 1198, 1130, 1106, 1120, 1168, 1260, 1353, 1491, 1641, 1811, 1963, 2112, 1988, 1795, 1626, 1484, 1374, 1274, 1198, 1174, 1190, 1237, 1317, 1427, 1538, 1695, 1840, 2000, 2140, 2045, 1877, 1708, 1567, 1443, 1360, 1304, 1267, 1288, 1337, 1398, 1491, 1621, 1781, 1919, 2039, 2112, 2109, 1936, 1792, 1633, 1539, 1450, 1396, 1377, 1376, 1422, 1496, 1579, 1697, 1835, 1976, 2028, 2029, 2089, 2028, 1884, 1734, 1638, 1543, 1490, 1460, 1466, 1514, 1579, 1670, 1774, 1910, 2013, 1904, 1790, 2117, 2065, 1961, 1854, 1752, 1672, 1616, 1590, 1599, 1623, 1700, 1782, 1867, 1984, 2022, 1698, ]
+ b: [1676, 1930, 1956, 1924, 1811, 1685, 1640, 1571, 1556, 1544, 1569, 1639, 1710, 1802, 1890, 1881, 1642, 1930, 2013, 1952, 1827, 1711, 1616, 1538, 1488, 1472, 1470, 1494, 1560, 1632, 1724, 1825, 1906, 1803, 1985, 2007, 1894, 1759, 1625, 1524, 1440, 1401, 1380, 1385, 1411, 1463, 1537, 1649, 1765, 1876, 1884, 1996, 1961, 1831, 1676, 1555, 1444, 1367, 1301, 1282, 1295, 1328, 1383, 1468, 1580, 1708, 1833, 1900, 2020, 1914, 1777, 1618, 1508, 1382, 1284, 1227, 1197, 1216, 1251, 1325, 1408, 1511, 1639, 1796, 1915, 1998, 1901, 1716, 1581, 1447, 1327, 1226, 1169, 1134, 1155, 1199, 1269, 1368, 1486, 1608, 1741, 1879, 1959, 1838, 1674, 1531, 1387, 1269, 1158, 1094, 1072, 1082, 1132, 1217, 1323, 1431, 1568, 1706, 1847, 1956, 1806, 1645, 1497, 1352, 1222, 1124, 1059, 1031, 1049, 1093, 1177, 1292, 1398, 1528, 1686, 1800, 1945, 1806, 1634, 1494, 1357, 1211, 1110, 1049, 1024, 1034, 1080, 1174, 1277, 1388, 1519, 1673, 1809, 1989, 1822, 1664, 1497, 1366, 1239, 1115, 1065, 1033, 1049, 1095, 1183, 1295, 1406, 1544, 1679, 1855, 1981, 1838, 1674, 1512, 1384, 1260, 1151, 1086, 1062, 1069, 1121, 1198, 1303, 1423, 1540, 1691, 1847, 1964, 1856, 1683, 1550, 1422, 1294, 1189, 1122, 1103, 1113, 1164, 1237, 1332, 1446, 1574, 1741, 1859, 2008, 1885, 1755, 1606, 1471, 1371, 1263, 1197, 1169, 1182, 1228, 1298, 1392, 1501, 1620, 1763, 1883, 2034, 1950, 1823, 1676, 1540, 1439, 1353, 1298, 1269, 1276, 1325, 1383, 1468, 1575, 1700, 1833, 1923, 2012, 1995, 1894, 1744, 1625, 1519, 1440, 1389, 1361, 1370, 1403, 1467, 1558, 1642, 1773, 1876, 1908, 1903, 2038, 1942, 1844, 1704, 1599, 1528, 1484, 1445, 1457, 1494, 1544, 1602, 1724, 1843, 1906, 1827, 1724, 2051, 2027, 1914, 1827, 1698, 1640, 1577, 1566, 1588, 1604, 1633, 1717, 1811, 1901, 1930, 1665, ]
+
+...
diff --git a/src/ipa/rkisp1/data/ov8858.yaml b/src/ipa/rkisp1/data/ov8858.yaml
new file mode 100644
index 00000000..f297b0e0
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov8858.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ #3264x2448_A_70 - A
+ - ct: 2856
+ resolution: 3264x2448
+ r: [4095, 3932, 3584, 3324, 3113, 2934, 2747, 2619, 2566, 2579, 2671, 2816, 3009, 3217, 3444, 3843, 4095, 4095, 3658, 3343, 3088, 2867, 2620, 2404, 2271, 2207, 2229, 2315, 2485, 2727, 2965, 3232, 3500, 4057, 3926, 3482, 3187, 2914, 2612, 2330, 2112, 1976, 1917, 1931, 2028, 2198, 2456, 2762, 3042, 3335, 3770, 3739, 3331, 3029, 2720, 2364, 2070, 1852, 1718, 1655, 1669, 1765, 1940, 2207, 2538, 2878, 3183, 3565, 3590, 3209, 2910, 2524, 2156, 1860, 1642, 1493, 1431, 1446, 1551, 1734, 1986, 2338, 2721, 3075, 3405, 3484, 3116, 2778, 2373, 1997, 1698, 1466, 1315, 1254, 1272, 1374, 1562, 1825, 2169, 2587, 2946, 3317, 3415, 3044, 2682, 2252, 1873, 1574, 1336, 1192, 1126, 1146, 1249, 1437, 1712, 2050, 2462, 2877, 3238, 3355, 3002, 2619, 2171, 1800, 1490, 1259, 1112, 1051, 1073, 1173, 1359, 1635, 1977, 2388, 2813, 3182, 3348, 2969, 2587, 2138, 1768, 1457, 1228, 1085, 1024, 1043, 1144, 1326, 1603, 1950, 2364, 2783, 3170, 3344, 2984, 2594, 2152, 1776, 1468, 1239, 1098, 1041, 1061, 1161, 1342, 1617, 1962, 2373, 2798, 3177, 3388, 3011, 2637, 2207, 1829, 1528, 1298, 1158, 1100, 1120, 1217, 1408, 1677, 2018, 2429, 2841, 3192, 3442, 3064, 2718, 2301, 1929, 1633, 1405, 1263, 1205, 1224, 1326, 1513, 1777, 2119, 2525, 2903, 3274, 3557, 3138, 2822, 2435, 2066, 1775, 1558, 1414, 1355, 1378, 1478, 1663, 1927, 2255, 2657, 2987, 3369, 3682, 3256, 2940, 2604, 2252, 1958, 1748, 1609, 1557, 1576, 1677, 1857, 2106, 2445, 2793, 3096, 3526, 3874, 3380, 3075, 2783, 2472, 2189, 1974, 1846, 1790, 1811, 1909, 2086, 2342, 2643, 2934, 3247, 3743, 4095, 3583, 3218, 2950, 2708, 2456, 2257, 2114, 2064, 2083, 2185, 2364, 2598, 2856, 3111, 3444, 4045, 4095, 3842, 3474, 3155, 2950, 2731, 2575, 2440, 2388, 2413, 2499, 2659, 2846, 3056, 3334, 3796, 4095, ]
+ gr: [3246, 2753, 2547, 2359, 2249, 2148, 2052, 1977, 1938, 1947, 1995, 2082, 2183, 2277, 2411, 2655, 2957, 2906, 2568, 2361, 2223, 2092, 1964, 1850, 1767, 1735, 1740, 1790, 1881, 2002, 2124, 2265, 2437, 2751, 2740, 2449, 2261, 2106, 1950, 1798, 1681, 1604, 1570, 1577, 1626, 1714, 1846, 2012, 2149, 2322, 2581, 2628, 2348, 2169, 2000, 1808, 1654, 1539, 1460, 1419, 1429, 1483, 1576, 1710, 1881, 2062, 2231, 2443, 2541, 2279, 2102, 1891, 1687, 1536, 1420, 1330, 1289, 1298, 1362, 1459, 1589, 1773, 1967, 2168, 2352, 2459, 2226, 2027, 1797, 1599, 1442, 1313, 1221, 1179, 1190, 1253, 1359, 1497, 1675, 1898, 2100, 2286, 2406, 2180, 1976, 1732, 1531, 1369, 1231, 1140, 1096, 1109, 1174, 1284, 1431, 1608, 1824, 2055, 2245, 2374, 2148, 1928, 1684, 1484, 1317, 1178, 1084, 1043, 1058, 1122, 1234, 1387, 1562, 1785, 2020, 2218, 2363, 2140, 1910, 1663, 1464, 1292, 1156, 1063, 1024, 1036, 1102, 1214, 1363, 1547, 1762, 2004, 2194, 2366, 2136, 1917, 1670, 1469, 1302, 1163, 1073, 1032, 1047, 1111, 1223, 1373, 1552, 1775, 2009, 2206, 2383, 2158, 1940, 1703, 1506, 1339, 1201, 1112, 1072, 1087, 1150, 1265, 1408, 1584, 1805, 2030, 2228, 2434, 2189, 1994, 1757, 1557, 1400, 1270, 1181, 1142, 1154, 1218, 1328, 1468, 1640, 1860, 2068, 2267, 2497, 2235, 2043, 1837, 1630, 1477, 1360, 1273, 1238, 1249, 1310, 1412, 1544, 1725, 1924, 2124, 2329, 2592, 2305, 2109, 1925, 1731, 1576, 1460, 1384, 1350, 1364, 1422, 1513, 1648, 1818, 2009, 2174, 2427, 2699, 2379, 2188, 2022, 1860, 1696, 1588, 1510, 1480, 1489, 1543, 1637, 1771, 1937, 2072, 2269, 2546, 2862, 2514, 2276, 2120, 1983, 1850, 1737, 1664, 1628, 1642, 1695, 1787, 1914, 2043, 2182, 2390, 2734, 3175, 2661, 2434, 2232, 2119, 2004, 1921, 1849, 1813, 1816, 1874, 1959, 2049, 2159, 2317, 2604, 2891, ]
+ gb: [3248, 2762, 2549, 2352, 2241, 2135, 2024, 1949, 1910, 1923, 1970, 2058, 2167, 2278, 2427, 2679, 3003, 2939, 2581, 2369, 2212, 2084, 1945, 1829, 1743, 1710, 1713, 1773, 1861, 1999, 2127, 2278, 2456, 2799, 2766, 2468, 2268, 2114, 1949, 1788, 1666, 1587, 1550, 1557, 1612, 1711, 1849, 2022, 2168, 2354, 2627, 2659, 2372, 2185, 2003, 1808, 1646, 1531, 1447, 1404, 1415, 1474, 1573, 1711, 1896, 2082, 2269, 2494, 2572, 2297, 2122, 1903, 1694, 1534, 1411, 1322, 1278, 1294, 1356, 1459, 1599, 1796, 2003, 2204, 2415, 2494, 2259, 2053, 1813, 1609, 1442, 1310, 1216, 1174, 1186, 1254, 1368, 1512, 1699, 1934, 2147, 2352, 2450, 2219, 2006, 1751, 1543, 1372, 1233, 1134, 1096, 1108, 1175, 1292, 1449, 1639, 1865, 2103, 2311, 2424, 2182, 1960, 1705, 1498, 1324, 1181, 1086, 1041, 1059, 1127, 1245, 1404, 1594, 1828, 2078, 2281, 2405, 2182, 1937, 1687, 1480, 1301, 1161, 1062, 1024, 1038, 1107, 1224, 1384, 1581, 1812, 2057, 2272, 2417, 2181, 1951, 1695, 1487, 1312, 1167, 1074, 1032, 1050, 1118, 1235, 1397, 1586, 1820, 2069, 2278, 2450, 2196, 1974, 1724, 1522, 1348, 1205, 1113, 1075, 1089, 1153, 1276, 1430, 1619, 1849, 2095, 2291, 2483, 2229, 2022, 1779, 1573, 1408, 1272, 1181, 1142, 1156, 1223, 1339, 1488, 1673, 1905, 2123, 2343, 2541, 2277, 2079, 1856, 1643, 1485, 1361, 1270, 1235, 1248, 1313, 1421, 1566, 1751, 1971, 2173, 2399, 2635, 2339, 2138, 1944, 1745, 1580, 1458, 1380, 1344, 1359, 1418, 1519, 1661, 1849, 2048, 2222, 2487, 2743, 2413, 2216, 2037, 1864, 1702, 1579, 1500, 1467, 1479, 1537, 1642, 1777, 1958, 2108, 2315, 2617, 2890, 2544, 2293, 2131, 1988, 1842, 1726, 1651, 1612, 1628, 1684, 1783, 1920, 2060, 2213, 2432, 2804, 3189, 2693, 2445, 2245, 2116, 2000, 1902, 1826, 1789, 1798, 1857, 1950, 2045, 2170, 2337, 2642, 2952, ]
+ b: [3058, 2592, 2385, 2213, 2113, 2016, 1936, 1869, 1845, 1844, 1887, 1965, 2056, 2162, 2288, 2535, 2815, 2739, 2411, 2208, 2067, 1959, 1848, 1747, 1681, 1655, 1659, 1709, 1788, 1909, 2024, 2149, 2317, 2640, 2595, 2298, 2119, 1981, 1836, 1704, 1608, 1543, 1517, 1519, 1561, 1646, 1774, 1925, 2042, 2217, 2463, 2469, 2218, 2033, 1880, 1710, 1575, 1479, 1419, 1384, 1398, 1439, 1527, 1647, 1810, 1968, 2125, 2330, 2404, 2138, 1979, 1785, 1611, 1474, 1374, 1303, 1271, 1280, 1336, 1421, 1545, 1706, 1895, 2058, 2261, 2341, 2104, 1920, 1713, 1535, 1397, 1284, 1203, 1168, 1181, 1237, 1339, 1462, 1631, 1822, 2012, 2194, 2293, 2063, 1882, 1662, 1480, 1336, 1206, 1128, 1092, 1106, 1165, 1270, 1407, 1565, 1767, 1965, 2158, 2262, 2048, 1845, 1625, 1450, 1289, 1165, 1079, 1041, 1057, 1122, 1223, 1370, 1534, 1725, 1940, 2129, 2258, 2046, 1834, 1605, 1433, 1273, 1147, 1058, 1024, 1037, 1102, 1209, 1352, 1519, 1711, 1928, 2110, 2261, 2041, 1847, 1615, 1442, 1282, 1151, 1069, 1028, 1048, 1109, 1218, 1359, 1523, 1716, 1927, 2124, 2282, 2064, 1864, 1645, 1461, 1316, 1184, 1103, 1070, 1083, 1143, 1249, 1389, 1552, 1745, 1948, 2141, 2326, 2090, 1907, 1695, 1505, 1362, 1247, 1164, 1133, 1144, 1202, 1307, 1436, 1597, 1794, 1985, 2182, 2380, 2132, 1952, 1758, 1569, 1429, 1323, 1247, 1215, 1229, 1283, 1379, 1506, 1669, 1851, 2025, 2222, 2458, 2187, 2000, 1835, 1653, 1511, 1407, 1344, 1314, 1326, 1374, 1461, 1583, 1749, 1916, 2069, 2319, 2559, 2255, 2066, 1910, 1757, 1616, 1512, 1450, 1427, 1431, 1481, 1565, 1688, 1850, 1970, 2151, 2432, 2700, 2384, 2151, 1995, 1874, 1747, 1637, 1577, 1552, 1563, 1610, 1689, 1817, 1934, 2064, 2254, 2607, 3019, 2498, 2301, 2107, 1991, 1888, 1808, 1742, 1716, 1716, 1775, 1847, 1930, 2044, 2200, 2494, 2763, ]
+ #3264x2448_D50_70 - D50
+ - ct: 5003
+ resolution: 3264x2448
+ r: [4095, 3613, 3287, 3049, 2867, 2696, 2545, 2427, 2374, 2387, 2473, 2592, 2779, 2948, 3156, 3544, 3984, 3842, 3341, 3076, 2850, 2650, 2438, 2245, 2123, 2065, 2085, 2164, 2316, 2531, 2745, 2979, 3232, 3738, 3605, 3194, 2924, 2694, 2430, 2182, 1986, 1867, 1814, 1824, 1909, 2060, 2301, 2567, 2807, 3088, 3473, 3432, 3048, 2806, 2516, 2208, 1953, 1758, 1638, 1581, 1596, 1679, 1836, 2061, 2367, 2669, 2928, 3285, 3275, 2940, 2676, 2354, 2027, 1763, 1572, 1443, 1385, 1398, 1496, 1648, 1878, 2184, 2527, 2813, 3150, 3181, 2855, 2566, 2201, 1877, 1622, 1413, 1284, 1226, 1243, 1333, 1502, 1732, 2033, 2391, 2731, 3021, 3116, 2786, 2474, 2100, 1773, 1510, 1304, 1171, 1114, 1131, 1224, 1389, 1630, 1925, 2296, 2638, 2973, 3060, 2752, 2410, 2024, 1710, 1437, 1231, 1101, 1044, 1063, 1152, 1318, 1559, 1865, 2228, 2600, 2919, 3044, 2730, 2388, 2001, 1677, 1403, 1204, 1073, 1024, 1036, 1128, 1289, 1534, 1839, 2198, 2569, 2903, 3039, 2734, 2392, 2004, 1684, 1417, 1210, 1086, 1031, 1050, 1138, 1306, 1544, 1845, 2204, 2576, 2916, 3099, 2751, 2432, 2050, 1732, 1469, 1264, 1136, 1085, 1101, 1194, 1358, 1596, 1891, 2264, 2612, 2929, 3131, 2808, 2499, 2142, 1811, 1556, 1354, 1230, 1178, 1195, 1286, 1451, 1683, 1986, 2341, 2678, 2991, 3235, 2875, 2592, 2258, 1936, 1679, 1491, 1363, 1310, 1332, 1421, 1582, 1813, 2113, 2455, 2737, 3096, 3357, 2965, 2692, 2412, 2094, 1840, 1650, 1533, 1485, 1501, 1591, 1747, 1979, 2275, 2582, 2840, 3239, 3543, 3094, 2808, 2555, 2298, 2043, 1851, 1737, 1685, 1703, 1791, 1955, 2178, 2459, 2700, 2992, 3425, 3749, 3286, 2950, 2712, 2495, 2282, 2093, 1972, 1919, 1950, 2033, 2186, 2412, 2625, 2856, 3165, 3713, 4095, 3514, 3156, 2880, 2701, 2511, 2370, 2249, 2203, 2222, 2309, 2454, 2607, 2813, 3060, 3476, 3973, ]
+ gr: [3126, 2654, 2449, 2277, 2167, 2065, 1967, 1898, 1859, 1866, 1917, 2000, 2085, 2198, 2323, 2565, 2866, 2805, 2487, 2288, 2151, 2020, 1894, 1781, 1706, 1672, 1681, 1731, 1812, 1937, 2057, 2191, 2358, 2670, 2662, 2378, 2191, 2044, 1889, 1739, 1629, 1554, 1520, 1528, 1576, 1662, 1791, 1947, 2083, 2253, 2496, 2545, 2278, 2108, 1939, 1753, 1606, 1498, 1421, 1385, 1393, 1444, 1533, 1656, 1830, 2001, 2166, 2370, 2460, 2205, 2037, 1834, 1644, 1494, 1384, 1301, 1264, 1275, 1328, 1422, 1547, 1723, 1914, 2100, 2284, 2377, 2164, 1972, 1748, 1557, 1410, 1287, 1200, 1162, 1174, 1231, 1334, 1463, 1632, 1846, 2043, 2218, 2335, 2117, 1922, 1686, 1494, 1339, 1213, 1125, 1090, 1100, 1157, 1263, 1401, 1569, 1778, 1995, 2176, 2311, 2081, 1879, 1641, 1452, 1292, 1163, 1078, 1038, 1055, 1111, 1217, 1356, 1527, 1740, 1960, 2152, 2296, 2074, 1861, 1621, 1434, 1273, 1142, 1058, 1024, 1032, 1093, 1197, 1338, 1508, 1718, 1949, 2134, 2292, 2079, 1863, 1628, 1441, 1280, 1149, 1065, 1029, 1042, 1100, 1207, 1347, 1519, 1728, 1951, 2144, 2319, 2089, 1890, 1658, 1470, 1312, 1185, 1101, 1065, 1077, 1138, 1242, 1378, 1549, 1757, 1976, 2157, 2353, 2128, 1936, 1706, 1519, 1366, 1249, 1162, 1129, 1142, 1198, 1303, 1434, 1600, 1808, 2011, 2202, 2417, 2165, 1985, 1785, 1586, 1443, 1327, 1249, 1217, 1226, 1283, 1378, 1506, 1675, 1874, 2060, 2255, 2508, 2231, 2044, 1867, 1681, 1530, 1425, 1348, 1320, 1331, 1386, 1476, 1601, 1770, 1955, 2110, 2345, 2616, 2306, 2124, 1958, 1799, 1648, 1536, 1466, 1437, 1448, 1497, 1589, 1716, 1880, 2017, 2199, 2467, 2754, 2434, 2202, 2053, 1920, 1788, 1681, 1608, 1574, 1588, 1641, 1726, 1853, 1980, 2112, 2304, 2656, 3054, 2562, 2347, 2155, 2038, 1931, 1843, 1778, 1742, 1748, 1803, 1887, 1976, 2089, 2229, 2513, 2806, ]
+ gb: [3110, 2650, 2442, 2268, 2159, 2061, 1963, 1887, 1855, 1860, 1910, 1995, 2091, 2202, 2330, 2589, 2876, 2817, 2480, 2285, 2141, 2019, 1890, 1777, 1697, 1664, 1670, 1725, 1811, 1936, 2060, 2200, 2370, 2701, 2645, 2378, 2188, 2041, 1882, 1735, 1623, 1548, 1513, 1524, 1567, 1660, 1798, 1959, 2096, 2272, 2534, 2550, 2276, 2104, 1935, 1753, 1601, 1494, 1417, 1377, 1388, 1441, 1533, 1660, 1839, 2014, 2181, 2402, 2452, 2209, 2036, 1834, 1641, 1493, 1377, 1298, 1257, 1272, 1328, 1426, 1554, 1732, 1932, 2122, 2315, 2387, 2165, 1969, 1749, 1559, 1407, 1285, 1197, 1159, 1171, 1233, 1337, 1472, 1649, 1862, 2070, 2256, 2336, 2119, 1926, 1684, 1495, 1340, 1210, 1124, 1087, 1100, 1159, 1269, 1411, 1582, 1801, 2019, 2219, 2312, 2092, 1885, 1644, 1453, 1295, 1164, 1077, 1036, 1054, 1115, 1221, 1370, 1544, 1763, 1995, 2189, 2297, 2086, 1862, 1629, 1435, 1275, 1145, 1058, 1024, 1036, 1097, 1205, 1352, 1529, 1746, 1980, 2180, 2305, 2091, 1869, 1634, 1444, 1283, 1151, 1066, 1030, 1045, 1106, 1215, 1360, 1538, 1754, 1987, 2182, 2329, 2104, 1896, 1662, 1476, 1315, 1187, 1101, 1066, 1081, 1142, 1249, 1395, 1566, 1785, 2007, 2205, 2369, 2133, 1942, 1715, 1523, 1370, 1247, 1163, 1128, 1141, 1203, 1309, 1447, 1618, 1834, 2043, 2240, 2430, 2181, 1995, 1785, 1588, 1444, 1330, 1247, 1216, 1227, 1287, 1387, 1520, 1694, 1902, 2086, 2299, 2513, 2244, 2058, 1879, 1688, 1534, 1424, 1350, 1317, 1331, 1388, 1478, 1613, 1786, 1975, 2139, 2392, 2625, 2320, 2129, 1965, 1806, 1649, 1539, 1465, 1435, 1446, 1500, 1596, 1728, 1895, 2039, 2230, 2517, 2757, 2450, 2210, 2061, 1924, 1795, 1680, 1608, 1572, 1587, 1638, 1732, 1863, 1994, 2136, 2337, 2692, 3076, 2574, 2347, 2163, 2039, 1933, 1842, 1764, 1738, 1749, 1804, 1883, 1981, 2095, 2253, 2542, 2845, ]
+ b: [2915, 2480, 2280, 2121, 2025, 1929, 1854, 1793, 1773, 1769, 1815, 1879, 1970, 2069, 2185, 2406, 2670, 2610, 2321, 2132, 1997, 1889, 1781, 1681, 1616, 1587, 1598, 1642, 1721, 1831, 1945, 2068, 2221, 2492, 2485, 2222, 2043, 1913, 1775, 1639, 1541, 1485, 1457, 1466, 1500, 1579, 1705, 1855, 1972, 2122, 2360, 2380, 2127, 1969, 1815, 1647, 1516, 1427, 1367, 1342, 1342, 1390, 1463, 1577, 1739, 1901, 2041, 2243, 2297, 2061, 1914, 1722, 1549, 1418, 1325, 1261, 1233, 1241, 1287, 1369, 1483, 1638, 1820, 1994, 2158, 2233, 2025, 1852, 1646, 1474, 1347, 1242, 1171, 1142, 1152, 1203, 1293, 1409, 1559, 1758, 1931, 2104, 2198, 1987, 1808, 1594, 1424, 1290, 1178, 1104, 1079, 1088, 1139, 1232, 1358, 1505, 1700, 1893, 2077, 2165, 1972, 1772, 1561, 1393, 1250, 1139, 1065, 1035, 1051, 1101, 1196, 1323, 1473, 1656, 1867, 2046, 2166, 1960, 1769, 1542, 1381, 1234, 1121, 1048, 1024, 1034, 1084, 1178, 1308, 1462, 1651, 1855, 2036, 2166, 1961, 1774, 1548, 1380, 1240, 1126, 1054, 1025, 1041, 1092, 1186, 1315, 1464, 1654, 1862, 2041, 2184, 1975, 1794, 1576, 1408, 1268, 1155, 1082, 1056, 1066, 1118, 1211, 1338, 1492, 1678, 1877, 2063, 2222, 1999, 1826, 1623, 1441, 1314, 1208, 1137, 1109, 1120, 1171, 1261, 1383, 1533, 1724, 1912, 2071, 2265, 2043, 1871, 1684, 1507, 1372, 1276, 1211, 1183, 1193, 1242, 1327, 1447, 1600, 1781, 1941, 2132, 2351, 2095, 1928, 1760, 1588, 1454, 1357, 1297, 1271, 1282, 1326, 1406, 1523, 1684, 1849, 1988, 2215, 2439, 2167, 1992, 1847, 1695, 1551, 1455, 1397, 1372, 1381, 1422, 1507, 1622, 1785, 1897, 2068, 2323, 2564, 2289, 2068, 1923, 1803, 1684, 1581, 1520, 1495, 1504, 1546, 1623, 1752, 1866, 1990, 2170, 2488, 2838, 2390, 2201, 2026, 1908, 1814, 1736, 1669, 1643, 1654, 1700, 1774, 1862, 1964, 2101, 2363, 2613, ]
+ #3264x2448_D65_70 - D65
+ - ct: 6504
+ resolution: 3264x2448
+ r: [4095, 3609, 3293, 3044, 2858, 2708, 2555, 2426, 2383, 2390, 2485, 2610, 2769, 2948, 3150, 3554, 4002, 3858, 3341, 3067, 2851, 2656, 2436, 2251, 2136, 2083, 2092, 2169, 2327, 2531, 2747, 2983, 3227, 3713, 3579, 3194, 2920, 2704, 2441, 2187, 2002, 1873, 1824, 1838, 1920, 2070, 2308, 2573, 2812, 3074, 3487, 3428, 3039, 2791, 2525, 2213, 1962, 1775, 1650, 1593, 1609, 1691, 1852, 2077, 2379, 2680, 2932, 3261, 3283, 2933, 2685, 2353, 2038, 1779, 1582, 1449, 1395, 1407, 1501, 1661, 1893, 2189, 2527, 2825, 3136, 3179, 2846, 2572, 2206, 1894, 1626, 1426, 1292, 1234, 1250, 1343, 1513, 1744, 2046, 2404, 2725, 3037, 3115, 2787, 2479, 2109, 1786, 1520, 1312, 1180, 1120, 1136, 1229, 1399, 1641, 1938, 2296, 2645, 2956, 3052, 2747, 2419, 2039, 1716, 1448, 1238, 1106, 1047, 1068, 1160, 1326, 1572, 1876, 2228, 2597, 2913, 3044, 2732, 2389, 2006, 1687, 1415, 1208, 1079, 1024, 1040, 1132, 1296, 1542, 1843, 2206, 2571, 2901, 3049, 2721, 2397, 2016, 1694, 1426, 1215, 1091, 1035, 1055, 1145, 1312, 1550, 1859, 2211, 2575, 2919, 3078, 2759, 2434, 2063, 1737, 1478, 1271, 1141, 1088, 1106, 1199, 1367, 1603, 1905, 2267, 2616, 2927, 3143, 2793, 2505, 2140, 1828, 1564, 1364, 1237, 1183, 1202, 1290, 1461, 1695, 1996, 2340, 2676, 2993, 3228, 2867, 2595, 2268, 1942, 1689, 1499, 1370, 1316, 1340, 1431, 1593, 1823, 2117, 2461, 2756, 3077, 3371, 2972, 2696, 2408, 2104, 1852, 1661, 1541, 1491, 1505, 1599, 1758, 1987, 2276, 2582, 2849, 3235, 3523, 3088, 2811, 2565, 2302, 2046, 1860, 1745, 1694, 1716, 1800, 1961, 2188, 2460, 2699, 2987, 3420, 3757, 3276, 2947, 2706, 2497, 2283, 2099, 1979, 1929, 1947, 2032, 2199, 2409, 2626, 2852, 3158, 3715, 4095, 3473, 3168, 2886, 2708, 2514, 2365, 2251, 2203, 2229, 2315, 2440, 2623, 2806, 3061, 3472, 3935, ]
+ gr: [3109, 2638, 2434, 2267, 2147, 2051, 1954, 1871, 1847, 1848, 1903, 1981, 2080, 2184, 2312, 2555, 2821, 2799, 2481, 2275, 2132, 2010, 1885, 1775, 1698, 1665, 1670, 1719, 1802, 1926, 2045, 2182, 2346, 2660, 2643, 2361, 2180, 2032, 1880, 1730, 1618, 1547, 1513, 1520, 1566, 1652, 1785, 1940, 2074, 2238, 2491, 2534, 2272, 2096, 1934, 1743, 1597, 1491, 1416, 1379, 1389, 1437, 1526, 1653, 1822, 1991, 2156, 2356, 2445, 2203, 2031, 1828, 1639, 1492, 1376, 1298, 1261, 1270, 1325, 1418, 1540, 1717, 1908, 2093, 2270, 2374, 2153, 1965, 1746, 1552, 1404, 1282, 1198, 1160, 1173, 1228, 1331, 1459, 1629, 1836, 2038, 2206, 2328, 2111, 1916, 1679, 1490, 1336, 1208, 1123, 1087, 1097, 1156, 1260, 1398, 1564, 1772, 1985, 2174, 2292, 2087, 1871, 1639, 1448, 1292, 1161, 1077, 1038, 1051, 1111, 1214, 1355, 1521, 1732, 1955, 2142, 2290, 2067, 1852, 1619, 1430, 1271, 1141, 1055, 1024, 1033, 1091, 1194, 1335, 1507, 1715, 1939, 2133, 2285, 2073, 1861, 1623, 1436, 1278, 1147, 1065, 1028, 1042, 1099, 1204, 1345, 1514, 1723, 1945, 2131, 2312, 2082, 1884, 1653, 1467, 1308, 1181, 1100, 1065, 1076, 1133, 1240, 1377, 1543, 1754, 1968, 2151, 2350, 2114, 1928, 1703, 1515, 1364, 1244, 1161, 1126, 1138, 1197, 1300, 1429, 1595, 1803, 2003, 2192, 2404, 2166, 1977, 1775, 1581, 1435, 1322, 1245, 1213, 1223, 1278, 1375, 1504, 1671, 1872, 2048, 2255, 2499, 2220, 2040, 1859, 1678, 1526, 1416, 1345, 1314, 1327, 1380, 1468, 1596, 1763, 1948, 2105, 2337, 2607, 2299, 2116, 1951, 1792, 1638, 1534, 1458, 1431, 1443, 1492, 1583, 1709, 1873, 2004, 2191, 2463, 2733, 2429, 2197, 2044, 1912, 1782, 1670, 1601, 1568, 1581, 1630, 1719, 1847, 1973, 2107, 2304, 2637, 3045, 2548, 2338, 2143, 2029, 1920, 1832, 1762, 1736, 1737, 1795, 1871, 1961, 2070, 2227, 2493, 2794, ]
+ gb: [3118, 2634, 2434, 2259, 2154, 2052, 1949, 1888, 1844, 1853, 1900, 1987, 2084, 2192, 2325, 2571, 2855, 2786, 2469, 2271, 2125, 2010, 1882, 1775, 1690, 1662, 1669, 1719, 1805, 1928, 2050, 2192, 2362, 2674, 2635, 2358, 2173, 2030, 1872, 1729, 1620, 1547, 1508, 1516, 1565, 1654, 1790, 1947, 2082, 2257, 2516, 2527, 2260, 2094, 1923, 1744, 1598, 1486, 1411, 1374, 1388, 1438, 1525, 1657, 1830, 2001, 2169, 2382, 2431, 2196, 2021, 1824, 1634, 1486, 1376, 1296, 1254, 1269, 1325, 1422, 1547, 1722, 1922, 2106, 2297, 2367, 2146, 1960, 1736, 1550, 1402, 1281, 1196, 1157, 1169, 1230, 1333, 1466, 1640, 1848, 2055, 2232, 2320, 2105, 1909, 1675, 1489, 1335, 1208, 1120, 1083, 1099, 1158, 1265, 1405, 1575, 1794, 2006, 2206, 2295, 2075, 1873, 1634, 1447, 1292, 1162, 1076, 1037, 1052, 1113, 1220, 1363, 1541, 1748, 1982, 2173, 2278, 2071, 1850, 1619, 1430, 1271, 1144, 1056, 1024, 1035, 1096, 1202, 1348, 1521, 1736, 1966, 2162, 2290, 2073, 1856, 1626, 1439, 1279, 1150, 1065, 1029, 1043, 1104, 1211, 1355, 1532, 1744, 1973, 2166, 2302, 2090, 1883, 1651, 1466, 1313, 1184, 1100, 1065, 1078, 1139, 1246, 1388, 1557, 1771, 1995, 2185, 2344, 2122, 1927, 1706, 1513, 1368, 1245, 1163, 1126, 1140, 1200, 1305, 1441, 1612, 1823, 2030, 2225, 2411, 2166, 1983, 1776, 1584, 1439, 1324, 1245, 1213, 1225, 1283, 1383, 1513, 1688, 1887, 2074, 2281, 2493, 2226, 2042, 1867, 1679, 1535, 1418, 1349, 1317, 1329, 1382, 1476, 1607, 1780, 1968, 2128, 2376, 2613, 2305, 2120, 1955, 1797, 1642, 1536, 1460, 1430, 1446, 1496, 1591, 1722, 1887, 2029, 2217, 2500, 2745, 2434, 2202, 2052, 1917, 1784, 1676, 1603, 1572, 1584, 1634, 1731, 1857, 1986, 2128, 2326, 2675, 3059, 2546, 2342, 2153, 2041, 1930, 1833, 1767, 1731, 1739, 1795, 1880, 1970, 2091, 2242, 2528, 2816, ]
+ b: [2873, 2460, 2268, 2104, 2011, 1921, 1837, 1775, 1753, 1759, 1798, 1871, 1956, 2059, 2172, 2375, 2631, 2606, 2309, 2117, 1990, 1879, 1768, 1673, 1606, 1582, 1588, 1633, 1705, 1820, 1931, 2051, 2202, 2475, 2458, 2204, 2033, 1901, 1760, 1630, 1533, 1475, 1452, 1455, 1495, 1572, 1694, 1839, 1962, 2110, 2332, 2361, 2122, 1964, 1800, 1640, 1506, 1417, 1362, 1332, 1340, 1378, 1452, 1573, 1727, 1887, 2031, 2222, 2280, 2053, 1893, 1713, 1542, 1414, 1321, 1257, 1229, 1235, 1282, 1365, 1470, 1633, 1804, 1974, 2144, 2220, 2010, 1846, 1638, 1472, 1340, 1238, 1168, 1141, 1149, 1201, 1288, 1403, 1551, 1742, 1923, 2094, 2180, 1986, 1797, 1591, 1416, 1287, 1176, 1105, 1077, 1088, 1137, 1230, 1350, 1502, 1688, 1885, 2062, 2161, 1955, 1767, 1554, 1387, 1249, 1135, 1064, 1035, 1050, 1097, 1191, 1317, 1471, 1654, 1863, 2027, 2145, 1955, 1757, 1539, 1375, 1233, 1121, 1047, 1024, 1033, 1086, 1175, 1303, 1454, 1640, 1848, 2020, 2154, 1953, 1760, 1542, 1379, 1237, 1124, 1053, 1027, 1038, 1089, 1182, 1310, 1463, 1645, 1848, 2028, 2167, 1965, 1781, 1567, 1400, 1266, 1152, 1083, 1054, 1066, 1117, 1209, 1334, 1483, 1674, 1867, 2043, 2207, 1995, 1816, 1613, 1440, 1311, 1204, 1137, 1109, 1118, 1169, 1258, 1378, 1527, 1713, 1899, 2067, 2247, 2035, 1862, 1676, 1500, 1369, 1274, 1208, 1182, 1190, 1237, 1324, 1439, 1592, 1770, 1930, 2126, 2337, 2085, 1919, 1752, 1585, 1447, 1353, 1294, 1270, 1278, 1325, 1401, 1517, 1672, 1842, 1979, 2199, 2421, 2154, 1984, 1835, 1686, 1549, 1450, 1393, 1369, 1381, 1418, 1500, 1617, 1769, 1886, 2055, 2310, 2539, 2273, 2056, 1921, 1791, 1680, 1576, 1515, 1490, 1499, 1544, 1624, 1737, 1860, 1983, 2162, 2458, 2817, 2386, 2185, 2018, 1904, 1802, 1724, 1668, 1638, 1646, 1685, 1765, 1851, 1953, 2089, 2342, 2607, ]
+ #3264x2448_D75_70 - D75
+ - ct: 7504
+ resolution: 3264x2448
+ r: [4095, 3519, 3218, 2985, 2815, 2645, 2509, 2389, 2327, 2355, 2435, 2555, 2710, 2908, 3107, 3455, 3909, 3739, 3284, 3001, 2795, 2603, 2392, 2213, 2093, 2049, 2058, 2135, 2281, 2493, 2685, 2920, 3163, 3650, 3536, 3113, 2865, 2641, 2393, 2149, 1967, 1852, 1802, 1811, 1894, 2037, 2267, 2525, 2747, 3014, 3388, 3358, 2983, 2730, 2466, 2185, 1933, 1755, 1634, 1579, 1590, 1678, 1826, 2049, 2329, 2621, 2864, 3207, 3196, 2870, 2628, 2311, 2001, 1757, 1569, 1439, 1382, 1396, 1488, 1645, 1865, 2163, 2477, 2773, 3063, 3115, 2785, 2512, 2175, 1859, 1619, 1412, 1285, 1228, 1243, 1335, 1502, 1726, 2015, 2362, 2666, 2951, 3027, 2733, 2430, 2073, 1761, 1507, 1303, 1172, 1116, 1132, 1223, 1388, 1622, 1913, 2253, 2591, 2908, 2995, 2683, 2368, 2007, 1696, 1435, 1234, 1104, 1045, 1068, 1154, 1317, 1561, 1846, 2189, 2547, 2845, 2960, 2670, 2344, 1972, 1667, 1403, 1205, 1074, 1024, 1038, 1128, 1290, 1526, 1816, 2166, 2519, 2841, 2985, 2665, 2355, 1980, 1675, 1416, 1210, 1087, 1032, 1052, 1141, 1300, 1537, 1836, 2171, 2530, 2837, 3017, 2686, 2380, 2030, 1721, 1465, 1264, 1140, 1086, 1104, 1190, 1358, 1586, 1879, 2221, 2556, 2871, 3062, 2738, 2456, 2107, 1796, 1549, 1356, 1232, 1175, 1192, 1285, 1446, 1672, 1961, 2298, 2626, 2926, 3172, 2807, 2533, 2227, 1916, 1670, 1485, 1356, 1308, 1325, 1415, 1577, 1801, 2085, 2411, 2676, 3033, 3272, 2904, 2640, 2360, 2069, 1821, 1639, 1525, 1476, 1492, 1580, 1735, 1951, 2232, 2536, 2784, 3143, 3481, 3014, 2752, 2511, 2256, 2018, 1835, 1719, 1672, 1687, 1777, 1931, 2151, 2414, 2647, 2922, 3369, 3652, 3193, 2877, 2650, 2441, 2239, 2058, 1946, 1895, 1918, 1999, 2153, 2365, 2572, 2794, 3086, 3594, 4095, 3408, 3097, 2824, 2643, 2469, 2323, 2215, 2158, 2187, 2264, 2412, 2554, 2742, 2991, 3425, 3869, ]
+ gr: [3118, 2636, 2433, 2254, 2141, 2035, 1950, 1873, 1840, 1849, 1893, 1975, 2079, 2175, 2303, 2544, 2821, 2787, 2475, 2277, 2131, 2003, 1880, 1767, 1691, 1656, 1665, 1715, 1794, 1921, 2037, 2179, 2343, 2648, 2644, 2359, 2180, 2024, 1877, 1724, 1615, 1543, 1508, 1516, 1561, 1650, 1780, 1935, 2071, 2236, 2483, 2533, 2271, 2094, 1926, 1742, 1593, 1487, 1413, 1377, 1385, 1434, 1520, 1647, 1819, 1984, 2150, 2358, 2451, 2197, 2027, 1823, 1635, 1491, 1375, 1296, 1258, 1268, 1324, 1417, 1538, 1712, 1905, 2087, 2270, 2374, 2145, 1961, 1741, 1549, 1402, 1281, 1196, 1159, 1169, 1227, 1325, 1458, 1624, 1834, 2028, 2212, 2324, 2109, 1912, 1678, 1487, 1335, 1208, 1123, 1087, 1096, 1155, 1260, 1394, 1560, 1769, 1981, 2168, 2302, 2071, 1872, 1633, 1447, 1290, 1159, 1076, 1038, 1052, 1109, 1211, 1356, 1521, 1728, 1954, 2134, 2285, 2065, 1850, 1617, 1427, 1269, 1142, 1054, 1024, 1033, 1090, 1194, 1333, 1502, 1714, 1936, 2128, 2281, 2075, 1855, 1621, 1435, 1277, 1146, 1064, 1030, 1042, 1100, 1203, 1341, 1513, 1721, 1948, 2122, 2312, 2076, 1880, 1647, 1463, 1308, 1180, 1099, 1064, 1075, 1132, 1237, 1375, 1539, 1746, 1961, 2151, 2345, 2115, 1924, 1700, 1514, 1361, 1244, 1160, 1126, 1137, 1194, 1298, 1427, 1592, 1802, 2001, 2181, 2409, 2156, 1978, 1774, 1578, 1435, 1320, 1242, 1211, 1221, 1276, 1372, 1498, 1668, 1864, 2047, 2237, 2494, 2218, 2033, 1858, 1672, 1520, 1415, 1343, 1311, 1324, 1376, 1462, 1590, 1758, 1940, 2097, 2340, 2607, 2290, 2110, 1945, 1786, 1638, 1526, 1455, 1425, 1437, 1485, 1578, 1705, 1868, 1998, 2185, 2460, 2727, 2419, 2192, 2039, 1906, 1775, 1666, 1593, 1565, 1576, 1627, 1711, 1838, 1963, 2101, 2299, 2626, 3040, 2538, 2330, 2138, 2021, 1918, 1827, 1755, 1724, 1732, 1784, 1866, 1954, 2068, 2214, 2496, 2760, ]
+ gb: [3103, 2631, 2429, 2258, 2149, 2044, 1949, 1878, 1843, 1853, 1904, 1985, 2081, 2188, 2320, 2563, 2842, 2787, 2459, 2271, 2124, 2008, 1878, 1772, 1689, 1663, 1666, 1715, 1801, 1924, 2045, 2190, 2357, 2679, 2626, 2355, 2170, 2027, 1869, 1724, 1617, 1543, 1507, 1517, 1566, 1653, 1785, 1945, 2080, 2250, 2509, 2516, 2256, 2083, 1920, 1737, 1595, 1485, 1413, 1376, 1385, 1438, 1526, 1654, 1826, 1997, 2161, 2383, 2426, 2190, 2013, 1820, 1629, 1486, 1374, 1294, 1255, 1266, 1325, 1419, 1543, 1721, 1918, 2103, 2291, 2358, 2142, 1954, 1731, 1545, 1400, 1280, 1194, 1157, 1171, 1227, 1334, 1465, 1633, 1848, 2045, 2227, 2319, 2095, 1902, 1672, 1488, 1334, 1207, 1123, 1085, 1096, 1157, 1261, 1401, 1572, 1784, 2003, 2191, 2286, 2071, 1863, 1631, 1445, 1289, 1160, 1075, 1038, 1053, 1113, 1221, 1363, 1534, 1743, 1971, 2167, 2278, 2059, 1844, 1613, 1427, 1271, 1143, 1057, 1024, 1035, 1096, 1199, 1346, 1518, 1731, 1960, 2153, 2280, 2065, 1853, 1619, 1438, 1278, 1149, 1066, 1029, 1044, 1105, 1210, 1354, 1528, 1735, 1970, 2160, 2302, 2080, 1875, 1649, 1465, 1309, 1183, 1100, 1065, 1079, 1136, 1246, 1384, 1556, 1767, 1987, 2178, 2346, 2109, 1923, 1697, 1514, 1365, 1245, 1160, 1127, 1141, 1199, 1303, 1438, 1608, 1818, 2027, 2215, 2410, 2158, 1976, 1774, 1578, 1437, 1325, 1245, 1212, 1225, 1284, 1379, 1514, 1680, 1883, 2068, 2272, 2489, 2219, 2041, 1862, 1677, 1529, 1417, 1345, 1314, 1327, 1381, 1474, 1600, 1780, 1961, 2120, 2371, 2601, 2306, 2111, 1953, 1795, 1642, 1534, 1459, 1431, 1443, 1496, 1587, 1717, 1881, 2024, 2213, 2482, 2733, 2436, 2194, 2049, 1910, 1784, 1674, 1600, 1567, 1581, 1632, 1728, 1855, 1985, 2122, 2321, 2675, 3032, 2542, 2344, 2151, 2037, 1930, 1834, 1767, 1732, 1747, 1791, 1879, 1968, 2083, 2239, 2522, 2807, ]
+ b: [2879, 2455, 2264, 2106, 2006, 1922, 1836, 1777, 1750, 1753, 1802, 1870, 1949, 2055, 2160, 2385, 2620, 2609, 2309, 2119, 1990, 1882, 1764, 1668, 1603, 1583, 1586, 1625, 1704, 1818, 1933, 2054, 2201, 2478, 2465, 2208, 2038, 1897, 1760, 1627, 1531, 1477, 1450, 1453, 1492, 1569, 1686, 1838, 1960, 2103, 2342, 2362, 2116, 1967, 1802, 1637, 1506, 1416, 1359, 1332, 1340, 1379, 1453, 1574, 1722, 1888, 2030, 2214, 2284, 2053, 1896, 1715, 1540, 1412, 1320, 1257, 1227, 1236, 1282, 1363, 1468, 1629, 1806, 1969, 2149, 2217, 2010, 1841, 1638, 1470, 1340, 1237, 1168, 1140, 1146, 1199, 1286, 1401, 1552, 1740, 1932, 2082, 2182, 1981, 1791, 1589, 1418, 1287, 1175, 1104, 1076, 1087, 1137, 1227, 1352, 1497, 1690, 1883, 2059, 2158, 1964, 1767, 1551, 1387, 1247, 1135, 1065, 1036, 1048, 1100, 1190, 1318, 1466, 1651, 1858, 2037, 2149, 1951, 1756, 1539, 1373, 1233, 1121, 1047, 1024, 1035, 1085, 1174, 1302, 1457, 1637, 1845, 2021, 2153, 1952, 1760, 1542, 1378, 1236, 1126, 1054, 1026, 1040, 1090, 1181, 1308, 1458, 1645, 1852, 2025, 2172, 1964, 1780, 1565, 1398, 1266, 1151, 1085, 1055, 1066, 1116, 1209, 1333, 1484, 1667, 1864, 2036, 2200, 1989, 1822, 1612, 1435, 1311, 1202, 1135, 1108, 1117, 1169, 1259, 1374, 1526, 1714, 1895, 2075, 2259, 2034, 1860, 1674, 1500, 1363, 1275, 1208, 1180, 1192, 1237, 1319, 1437, 1591, 1767, 1932, 2119, 2327, 2081, 1914, 1750, 1580, 1445, 1350, 1292, 1269, 1279, 1320, 1400, 1515, 1671, 1835, 1975, 2198, 2428, 2152, 1983, 1838, 1684, 1546, 1448, 1394, 1367, 1377, 1417, 1501, 1615, 1768, 1890, 2056, 2310, 2536, 2273, 2059, 1919, 1794, 1676, 1576, 1512, 1487, 1499, 1543, 1621, 1741, 1856, 1980, 2155, 2463, 2820, 2387, 2189, 2014, 1906, 1806, 1722, 1672, 1639, 1645, 1687, 1758, 1846, 1950, 2094, 2345, 2609, ]
+ #3264x2448_F11_TL84_70 - F11_TL84
+ - ct: 4000
+ resolution: 3264x2448
+ r: [4002, 3309, 3035, 2794, 2634, 2461, 2319, 2207, 2157, 2168, 2244, 2370, 2537, 2712, 2917, 3269, 3672, 3551, 3103, 2825, 2625, 2420, 2214, 2037, 1922, 1874, 1882, 1956, 2100, 2302, 2511, 2738, 2969, 3444, 3298, 2949, 2692, 2463, 2213, 1969, 1792, 1686, 1640, 1646, 1721, 1857, 2074, 2333, 2576, 2831, 3187, 3157, 2805, 2562, 2298, 1998, 1762, 1596, 1491, 1444, 1454, 1521, 1655, 1863, 2142, 2432, 2691, 3014, 3030, 2709, 2454, 2128, 1831, 1597, 1435, 1335, 1291, 1302, 1366, 1495, 1686, 1971, 2291, 2593, 2883, 2940, 2627, 2345, 1995, 1701, 1475, 1311, 1216, 1176, 1186, 1246, 1372, 1564, 1831, 2173, 2490, 2788, 2868, 2575, 2259, 1900, 1604, 1387, 1231, 1136, 1095, 1105, 1167, 1286, 1475, 1735, 2074, 2418, 2721, 2826, 2533, 2203, 1835, 1548, 1332, 1177, 1084, 1042, 1056, 1116, 1233, 1422, 1676, 2015, 2370, 2679, 2812, 2511, 2176, 1810, 1521, 1303, 1157, 1063, 1024, 1034, 1095, 1216, 1398, 1657, 1989, 2342, 2677, 2816, 2517, 2185, 1816, 1530, 1312, 1161, 1070, 1031, 1041, 1109, 1224, 1410, 1665, 1999, 2359, 2664, 2839, 2531, 2218, 1856, 1571, 1350, 1197, 1106, 1065, 1080, 1142, 1263, 1451, 1708, 2046, 2389, 2703, 2896, 2578, 2281, 1935, 1636, 1421, 1265, 1171, 1135, 1147, 1209, 1335, 1527, 1788, 2123, 2454, 2753, 2994, 2638, 2366, 2046, 1749, 1522, 1365, 1268, 1231, 1245, 1310, 1442, 1638, 1912, 2230, 2518, 2840, 3101, 2741, 2467, 2183, 1895, 1664, 1502, 1402, 1363, 1376, 1451, 1582, 1789, 2057, 2362, 2609, 2977, 3260, 2841, 2581, 2342, 2083, 1842, 1676, 1575, 1534, 1553, 1625, 1769, 1977, 2240, 2474, 2752, 3175, 3489, 3019, 2716, 2496, 2274, 2077, 1899, 1789, 1751, 1769, 1847, 1991, 2189, 2409, 2631, 2927, 3411, 3949, 3229, 2910, 2647, 2477, 2296, 2156, 2049, 2010, 2022, 2104, 2237, 2398, 2579, 2812, 3226, 3666, ]
+ gr: [3132, 2654, 2457, 2283, 2168, 2064, 1974, 1892, 1855, 1864, 1922, 1997, 2100, 2202, 2331, 2576, 2861, 2822, 2487, 2297, 2143, 2021, 1891, 1780, 1697, 1664, 1669, 1720, 1809, 1934, 2058, 2197, 2364, 2674, 2652, 2374, 2189, 2039, 1882, 1732, 1618, 1541, 1502, 1512, 1561, 1654, 1788, 1943, 2081, 2250, 2503, 2542, 2272, 2100, 1925, 1743, 1592, 1482, 1408, 1367, 1378, 1429, 1517, 1644, 1816, 1993, 2163, 2364, 2454, 2203, 2028, 1824, 1624, 1481, 1366, 1286, 1249, 1256, 1312, 1409, 1527, 1709, 1905, 2097, 2279, 2368, 2158, 1956, 1731, 1540, 1390, 1275, 1189, 1153, 1165, 1219, 1318, 1446, 1615, 1833, 2032, 2220, 2332, 2110, 1908, 1667, 1473, 1322, 1200, 1119, 1085, 1095, 1149, 1249, 1383, 1550, 1760, 1983, 2175, 2300, 2074, 1859, 1619, 1428, 1273, 1154, 1072, 1038, 1052, 1105, 1203, 1339, 1506, 1722, 1951, 2146, 2289, 2061, 1844, 1602, 1410, 1256, 1134, 1053, 1024, 1031, 1089, 1183, 1320, 1490, 1702, 1938, 2137, 2282, 2067, 1845, 1605, 1418, 1260, 1141, 1061, 1027, 1041, 1095, 1194, 1328, 1497, 1713, 1942, 2139, 2318, 2083, 1870, 1634, 1448, 1296, 1173, 1096, 1062, 1073, 1129, 1226, 1363, 1528, 1741, 1967, 2157, 2345, 2113, 1918, 1691, 1495, 1351, 1233, 1154, 1119, 1132, 1189, 1286, 1418, 1583, 1795, 2001, 2190, 2416, 2159, 1976, 1767, 1568, 1424, 1311, 1232, 1202, 1211, 1268, 1363, 1490, 1661, 1868, 2047, 2256, 2502, 2222, 2037, 1855, 1670, 1518, 1407, 1333, 1302, 1313, 1369, 1457, 1591, 1756, 1941, 2106, 2352, 2619, 2304, 2118, 1948, 1789, 1638, 1523, 1449, 1418, 1432, 1483, 1578, 1706, 1875, 2011, 2197, 2473, 2758, 2433, 2198, 2052, 1915, 1783, 1674, 1593, 1566, 1576, 1629, 1721, 1852, 1976, 2115, 2312, 2657, 3071, 2569, 2344, 2154, 2039, 1930, 1841, 1773, 1734, 1748, 1795, 1881, 1974, 2089, 2231, 2521, 2802, ]
+ gb: [3133, 2656, 2457, 2275, 2154, 2053, 1951, 1877, 1838, 1848, 1901, 1985, 2088, 2205, 2345, 2598, 2891, 2824, 2492, 2292, 2135, 2015, 1879, 1765, 1681, 1647, 1653, 1708, 1800, 1928, 2056, 2208, 2384, 2708, 2667, 2381, 2198, 2039, 1879, 1723, 1610, 1527, 1492, 1502, 1553, 1645, 1781, 1953, 2093, 2277, 2545, 2558, 2287, 2108, 1931, 1743, 1586, 1472, 1400, 1359, 1367, 1424, 1513, 1652, 1830, 2012, 2188, 2417, 2474, 2212, 2042, 1831, 1630, 1477, 1365, 1283, 1242, 1255, 1313, 1408, 1538, 1723, 1930, 2127, 2323, 2395, 2169, 1970, 1738, 1548, 1392, 1272, 1187, 1151, 1161, 1222, 1322, 1459, 1633, 1861, 2066, 2263, 2356, 2130, 1922, 1679, 1479, 1325, 1200, 1118, 1082, 1094, 1151, 1254, 1396, 1573, 1792, 2024, 2227, 2337, 2095, 1883, 1627, 1438, 1279, 1156, 1074, 1038, 1054, 1110, 1211, 1352, 1530, 1752, 1997, 2195, 2306, 2095, 1861, 1616, 1421, 1258, 1139, 1055, 1024, 1035, 1094, 1193, 1335, 1513, 1741, 1986, 2182, 2315, 2094, 1867, 1622, 1427, 1266, 1143, 1064, 1029, 1044, 1100, 1202, 1344, 1523, 1746, 1989, 2193, 2342, 2108, 1890, 1648, 1458, 1299, 1176, 1096, 1061, 1075, 1132, 1236, 1376, 1557, 1773, 2010, 2203, 2377, 2140, 1939, 1704, 1508, 1353, 1232, 1154, 1120, 1131, 1193, 1292, 1432, 1608, 1828, 2044, 2251, 2443, 2185, 1992, 1782, 1577, 1428, 1315, 1233, 1199, 1214, 1271, 1370, 1504, 1685, 1895, 2093, 2305, 2519, 2249, 2058, 1869, 1675, 1519, 1406, 1331, 1298, 1313, 1371, 1462, 1599, 1781, 1976, 2139, 2405, 2637, 2326, 2130, 1962, 1792, 1637, 1521, 1445, 1412, 1428, 1481, 1578, 1713, 1888, 2035, 2238, 2529, 2777, 2458, 2215, 2053, 1917, 1776, 1662, 1588, 1554, 1568, 1624, 1722, 1851, 1992, 2136, 2351, 2708, 3076, 2575, 2354, 2161, 2036, 1925, 1834, 1757, 1723, 1732, 1779, 1874, 1972, 2093, 2258, 2546, 2857, ]
+ b: [2906, 2483, 2290, 2108, 2020, 1921, 1851, 1778, 1756, 1759, 1799, 1880, 1969, 2074, 2183, 2435, 2664, 2618, 2324, 2122, 1992, 1883, 1772, 1666, 1601, 1578, 1586, 1627, 1712, 1827, 1934, 2072, 2225, 2524, 2483, 2211, 2037, 1900, 1761, 1625, 1532, 1472, 1447, 1449, 1486, 1571, 1692, 1847, 1968, 2118, 2360, 2370, 2126, 1961, 1803, 1638, 1509, 1411, 1355, 1324, 1335, 1376, 1449, 1572, 1729, 1884, 2042, 2233, 2286, 2051, 1902, 1710, 1537, 1407, 1314, 1249, 1222, 1228, 1276, 1356, 1472, 1629, 1815, 1975, 2159, 2238, 2012, 1839, 1636, 1463, 1333, 1232, 1165, 1137, 1144, 1192, 1280, 1394, 1549, 1743, 1922, 2094, 2184, 1979, 1797, 1586, 1413, 1279, 1170, 1102, 1074, 1086, 1134, 1219, 1345, 1492, 1684, 1888, 2067, 2160, 1958, 1765, 1546, 1378, 1240, 1132, 1062, 1035, 1050, 1095, 1184, 1307, 1459, 1646, 1858, 2036, 2151, 1954, 1752, 1531, 1366, 1224, 1115, 1046, 1026, 1033, 1081, 1170, 1293, 1450, 1635, 1845, 2032, 2155, 1948, 1754, 1535, 1373, 1228, 1118, 1053, 1024, 1038, 1088, 1175, 1299, 1452, 1638, 1849, 2027, 2179, 1970, 1780, 1565, 1391, 1259, 1147, 1079, 1053, 1063, 1113, 1203, 1324, 1474, 1668, 1869, 2037, 2214, 1989, 1816, 1610, 1433, 1297, 1194, 1130, 1105, 1112, 1161, 1249, 1367, 1522, 1710, 1892, 2074, 2264, 2034, 1863, 1673, 1491, 1360, 1264, 1199, 1176, 1185, 1230, 1312, 1434, 1590, 1770, 1936, 2127, 2348, 2084, 1916, 1751, 1581, 1437, 1343, 1284, 1254, 1268, 1312, 1395, 1516, 1673, 1837, 1986, 2216, 2445, 2159, 1975, 1832, 1684, 1544, 1441, 1381, 1358, 1367, 1413, 1494, 1612, 1773, 1894, 2067, 2330, 2573, 2285, 2061, 1914, 1791, 1672, 1568, 1507, 1480, 1492, 1529, 1619, 1743, 1862, 1987, 2168, 2475, 2853, 2395, 2197, 2003, 1909, 1798, 1726, 1652, 1638, 1640, 1687, 1762, 1852, 1956, 2101, 2365, 2643, ]
+ #3264x2448_F2_CWF_70 - F2_CWF
+ - ct: 4230
+ resolution: 3264x2448
+ r: [3695, 3077, 2822, 2622, 2472, 2342, 2200, 2111, 2075, 2079, 2145, 2258, 2393, 2547, 2713, 3030, 3396, 3294, 2882, 2641, 2461, 2294, 2117, 1965, 1868, 1822, 1827, 1898, 2020, 2200, 2366, 2557, 2763, 3190, 3081, 2755, 2527, 2334, 2120, 1915, 1760, 1667, 1625, 1635, 1702, 1820, 2002, 2225, 2422, 2641, 2979, 2935, 2624, 2415, 2192, 1939, 1732, 1587, 1496, 1452, 1461, 1526, 1643, 1825, 2064, 2314, 2518, 2804, 2832, 2532, 2323, 2050, 1792, 1591, 1448, 1348, 1301, 1315, 1382, 1504, 1675, 1916, 2190, 2435, 2700, 2735, 2464, 2229, 1935, 1680, 1485, 1327, 1227, 1183, 1194, 1265, 1392, 1567, 1799, 2091, 2351, 2611, 2673, 2415, 2150, 1853, 1597, 1397, 1244, 1144, 1096, 1111, 1182, 1308, 1489, 1715, 2000, 2291, 2552, 2638, 2381, 2104, 1797, 1546, 1342, 1189, 1086, 1042, 1058, 1126, 1255, 1435, 1666, 1950, 2257, 2514, 2621, 2361, 2083, 1766, 1525, 1319, 1164, 1064, 1024, 1037, 1106, 1231, 1415, 1644, 1929, 2233, 2506, 2638, 2364, 2088, 1777, 1528, 1326, 1168, 1073, 1029, 1046, 1115, 1240, 1422, 1654, 1941, 2237, 2511, 2655, 2388, 2121, 1813, 1563, 1366, 1210, 1114, 1070, 1084, 1155, 1283, 1459, 1693, 1981, 2269, 2530, 2712, 2427, 2182, 1884, 1628, 1428, 1281, 1183, 1143, 1158, 1226, 1352, 1531, 1764, 2046, 2317, 2579, 2790, 2485, 2250, 1983, 1722, 1523, 1379, 1284, 1242, 1258, 1327, 1454, 1628, 1862, 2139, 2376, 2667, 2895, 2571, 2344, 2103, 1851, 1644, 1506, 1409, 1371, 1388, 1457, 1578, 1756, 1996, 2250, 2457, 2782, 3048, 2672, 2441, 2229, 2007, 1806, 1658, 1567, 1526, 1541, 1611, 1739, 1916, 2148, 2340, 2583, 2953, 3225, 2827, 2544, 2353, 2172, 1998, 1846, 1755, 1708, 1732, 1794, 1928, 2102, 2282, 2468, 2726, 3175, 3641, 3010, 2734, 2492, 2341, 2192, 2069, 1968, 1937, 1948, 2023, 2139, 2270, 2437, 2634, 2994, 3392, ]
+ gr: [3050, 2599, 2407, 2232, 2134, 2044, 1950, 1879, 1843, 1845, 1897, 1973, 2069, 2164, 2285, 2518, 2788, 2763, 2436, 2247, 2112, 1994, 1867, 1764, 1688, 1655, 1661, 1710, 1788, 1907, 2024, 2157, 2320, 2612, 2604, 2323, 2155, 2009, 1858, 1715, 1606, 1543, 1504, 1512, 1556, 1640, 1766, 1917, 2047, 2211, 2450, 2492, 2232, 2067, 1906, 1727, 1584, 1480, 1411, 1371, 1381, 1428, 1512, 1632, 1799, 1962, 2124, 2327, 2400, 2164, 1999, 1801, 1617, 1475, 1369, 1292, 1252, 1264, 1317, 1408, 1525, 1691, 1879, 2063, 2240, 2326, 2120, 1935, 1721, 1533, 1392, 1278, 1194, 1156, 1167, 1225, 1319, 1443, 1606, 1809, 2003, 2170, 2291, 2075, 1883, 1653, 1470, 1323, 1204, 1122, 1086, 1096, 1153, 1252, 1381, 1540, 1746, 1951, 2139, 2256, 2043, 1839, 1609, 1430, 1278, 1158, 1076, 1038, 1052, 1108, 1206, 1341, 1500, 1702, 1929, 2103, 2242, 2036, 1820, 1596, 1411, 1260, 1138, 1053, 1024, 1032, 1091, 1186, 1322, 1484, 1690, 1909, 2098, 2251, 2034, 1826, 1598, 1416, 1267, 1143, 1065, 1027, 1043, 1097, 1198, 1328, 1493, 1694, 1913, 2096, 2263, 2048, 1852, 1626, 1447, 1298, 1177, 1096, 1063, 1075, 1131, 1230, 1360, 1521, 1723, 1934, 2117, 2316, 2078, 1897, 1680, 1494, 1351, 1238, 1159, 1123, 1135, 1193, 1290, 1416, 1572, 1776, 1974, 2152, 2362, 2122, 1947, 1746, 1562, 1424, 1313, 1238, 1207, 1218, 1272, 1361, 1484, 1647, 1838, 2014, 2215, 2461, 2182, 2007, 1835, 1653, 1510, 1408, 1336, 1305, 1317, 1368, 1456, 1576, 1736, 1919, 2068, 2306, 2560, 2260, 2080, 1920, 1771, 1626, 1516, 1450, 1420, 1432, 1480, 1566, 1687, 1844, 1975, 2157, 2418, 2703, 2387, 2160, 2012, 1888, 1763, 1660, 1588, 1558, 1566, 1617, 1702, 1827, 1943, 2075, 2267, 2603, 2992, 2511, 2296, 2118, 2001, 1898, 1817, 1749, 1719, 1730, 1779, 1859, 1938, 2050, 2187, 2457, 2741, ]
+ gb: [3060, 2612, 2398, 2229, 2123, 2030, 1932, 1857, 1822, 1830, 1874, 1957, 2069, 2163, 2291, 2542, 2825, 2776, 2432, 2251, 2106, 1988, 1856, 1748, 1668, 1636, 1641, 1695, 1784, 1902, 2026, 2170, 2338, 2654, 2609, 2336, 2151, 2005, 1853, 1710, 1597, 1527, 1487, 1500, 1546, 1634, 1768, 1926, 2063, 2235, 2497, 2514, 2248, 2075, 1908, 1727, 1578, 1471, 1396, 1360, 1371, 1422, 1509, 1639, 1810, 1981, 2151, 2365, 2415, 2182, 2010, 1807, 1619, 1474, 1366, 1284, 1247, 1257, 1316, 1409, 1532, 1710, 1906, 2098, 2282, 2358, 2140, 1949, 1725, 1539, 1393, 1276, 1191, 1153, 1166, 1224, 1325, 1455, 1628, 1840, 2045, 2226, 2308, 2101, 1903, 1666, 1479, 1329, 1204, 1121, 1083, 1098, 1154, 1260, 1395, 1565, 1775, 2000, 2191, 2296, 2069, 1863, 1625, 1437, 1285, 1160, 1074, 1038, 1053, 1112, 1214, 1355, 1527, 1746, 1970, 2167, 2280, 2060, 1844, 1609, 1422, 1262, 1140, 1055, 1024, 1034, 1095, 1198, 1337, 1516, 1724, 1962, 2155, 2284, 2063, 1850, 1618, 1429, 1273, 1147, 1064, 1030, 1043, 1104, 1207, 1351, 1519, 1738, 1965, 2159, 2303, 2083, 1878, 1640, 1460, 1304, 1182, 1099, 1065, 1078, 1136, 1244, 1379, 1552, 1764, 1986, 2181, 2341, 2110, 1916, 1698, 1504, 1359, 1238, 1159, 1125, 1136, 1197, 1297, 1431, 1599, 1809, 2018, 2208, 2403, 2156, 1967, 1764, 1570, 1427, 1315, 1237, 1205, 1217, 1274, 1369, 1502, 1673, 1875, 2061, 2278, 2488, 2208, 2025, 1848, 1662, 1513, 1405, 1333, 1304, 1314, 1372, 1460, 1588, 1760, 1946, 2108, 2355, 2596, 2289, 2101, 1934, 1775, 1624, 1516, 1442, 1412, 1425, 1476, 1571, 1700, 1865, 2005, 2195, 2486, 2720, 2411, 2169, 2025, 1895, 1760, 1650, 1578, 1548, 1559, 1612, 1702, 1834, 1960, 2101, 2302, 2647, 3035, 2523, 2314, 2125, 2002, 1897, 1806, 1738, 1705, 1716, 1766, 1855, 1944, 2061, 2204, 2497, 2792, ]
+ b: [2861, 2421, 2239, 2078, 1980, 1893, 1811, 1762, 1723, 1742, 1779, 1851, 1933, 2034, 2151, 2359, 2635, 2562, 2279, 2088, 1949, 1859, 1748, 1650, 1585, 1562, 1570, 1607, 1691, 1798, 1909, 2028, 2181, 2467, 2428, 2166, 2009, 1873, 1736, 1613, 1518, 1461, 1436, 1441, 1480, 1557, 1676, 1814, 1932, 2087, 2311, 2326, 2088, 1923, 1779, 1621, 1492, 1404, 1351, 1322, 1329, 1368, 1445, 1557, 1708, 1863, 2004, 2200, 2250, 2013, 1869, 1687, 1522, 1398, 1309, 1250, 1218, 1231, 1273, 1354, 1457, 1615, 1779, 1941, 2113, 2187, 1979, 1812, 1617, 1454, 1331, 1231, 1163, 1137, 1145, 1195, 1277, 1392, 1537, 1720, 1899, 2061, 2161, 1947, 1769, 1567, 1405, 1273, 1171, 1101, 1078, 1087, 1132, 1222, 1336, 1483, 1665, 1849, 2018, 2122, 1923, 1740, 1530, 1369, 1239, 1131, 1064, 1037, 1049, 1096, 1182, 1306, 1452, 1625, 1829, 1999, 2115, 1919, 1730, 1520, 1360, 1222, 1117, 1046, 1024, 1033, 1086, 1169, 1288, 1439, 1617, 1815, 1991, 2121, 1918, 1736, 1524, 1359, 1227, 1119, 1053, 1025, 1040, 1088, 1173, 1295, 1442, 1624, 1817, 1995, 2136, 1934, 1750, 1546, 1384, 1254, 1147, 1079, 1053, 1063, 1114, 1203, 1321, 1464, 1649, 1837, 2004, 2179, 1955, 1795, 1587, 1423, 1294, 1195, 1131, 1105, 1112, 1161, 1247, 1362, 1506, 1688, 1872, 2037, 2228, 1999, 1833, 1656, 1480, 1353, 1263, 1197, 1172, 1182, 1228, 1311, 1423, 1574, 1751, 1903, 2078, 2309, 2047, 1889, 1724, 1558, 1425, 1336, 1277, 1252, 1263, 1308, 1382, 1500, 1654, 1806, 1954, 2164, 2390, 2114, 1949, 1802, 1660, 1524, 1429, 1373, 1352, 1360, 1401, 1482, 1597, 1748, 1863, 2031, 2287, 2520, 2231, 2019, 1882, 1760, 1651, 1549, 1494, 1466, 1478, 1519, 1597, 1715, 1827, 1947, 2124, 2444, 2788, 2355, 2157, 1974, 1878, 1770, 1701, 1637, 1615, 1612, 1661, 1743, 1824, 1925, 2064, 2315, 2599, ]
+
diff --git a/src/ipa/rkisp1/data/uncalibrated.yaml b/src/ipa/rkisp1/data/uncalibrated.yaml
index bdbd5fda..a7bbd8d8 100644
--- a/src/ipa/rkisp1/data/uncalibrated.yaml
+++ b/src/ipa/rkisp1/data/uncalibrated.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-%YAML 1.2
+%YAML 1.1
---
version: 1
algorithms:
diff --git a/src/ipa/rkisp1/ipa_context.cpp b/src/ipa/rkisp1/ipa_context.cpp
index 1559d3ff..283bc131 100644
--- a/src/ipa/rkisp1/ipa_context.cpp
+++ b/src/ipa/rkisp1/ipa_context.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * ipa_context.cpp - RkISP1 IPA Context
+ * RkISP1 IPA Context
*/
#include "ipa_context.h"
@@ -15,6 +15,25 @@
namespace libcamera::ipa::rkisp1 {
/**
+ * \struct IPAHwSettings
+ * \brief RkISP1 version-specific hardware parameters
+ */
+
+/**
+ * \var IPAHwSettings::numAeCells
+ * \brief Number of cells in the AE exposure means grid
+ *
+ * \var IPAHwSettings::numHistogramBins
+ * \brief Number of bins in the histogram
+ *
+ * \var IPAHwSettings::numHistogramWeights
+ * \brief Number of weights in the histogram grid
+ *
+ * \var IPAHwSettings::numGammaOutSamples
+ * \brief Number of samples in the gamma out table
+ */
+
+/**
* \struct IPASessionConfiguration
* \brief Session configuration for the IPA module
*
@@ -25,84 +44,217 @@ namespace libcamera::ipa::rkisp1 {
*/
/**
- * \struct IPAFrameContext
- * \brief Per-frame context for algorithms
+ * \var IPASessionConfiguration::agc
+ * \brief AGC parameters configuration of the IPA
+ *
+ * \var IPASessionConfiguration::agc.measureWindow
+ * \brief AGC measure window
+ */
+
+/**
+ * \var IPASessionConfiguration::awb
+ * \brief AWB parameters configuration of the IPA
*
- * The frame context stores data specific to a single frame processed by the
- * IPA. Each frame processed by the IPA has a context associated with it,
- * accessible through the IPAContext structure.
+ * \var IPASessionConfiguration::awb.measureWindow
+ * \brief AWB measure window
*
- * \todo Detail how to access contexts for a particular frame
+ * \var IPASessionConfiguration::awb.enabled
+ * \brief Indicates if the AWB hardware is enabled and applies colour gains
*
- * Each of the fields in the frame context belongs to either a specific
- * algorithm, or to the top-level IPA module. A field may be read by any
- * algorithm, but should only be written by its owner.
+ * The AWB module of the ISP applies colour gains and computes statistics. It is
+ * enabled when the AWB algorithm is loaded, regardless of whether the algorithm
+ * operates in manual or automatic mode.
*/
/**
- * \struct IPAContext
- * \brief Global IPA context data shared between all algorithms
+ * \var IPASessionConfiguration::lsc
+ * \brief Lens Shading Correction configuration of the IPA
*
- * \var IPAContext::configuration
- * \brief The IPA session configuration, immutable during the session
+ * \var IPASessionConfiguration::lsc.enabled
+ * \brief Indicates if the LSC hardware is enabled
+ */
+
+/**
+ * \var IPASessionConfiguration::sensor
+ * \brief Sensor-specific configuration of the IPA
+ *
+ * \var IPASessionConfiguration::sensor.minShutterSpeed
+ * \brief Minimum shutter speed supported with the sensor
+ *
+ * \var IPASessionConfiguration::sensor.maxShutterSpeed
+ * \brief Maximum shutter speed supported with the sensor
*
- * \var IPAContext::frameContext
- * \brief The frame context for the frame being processed
+ * \var IPASessionConfiguration::sensor.minAnalogueGain
+ * \brief Minimum analogue gain supported with the sensor
*
- * \todo While the frame context is supposed to be per-frame, this
- * single frame context stores data related to both the current frame
- * and the previous frames, with fields being updated as the algorithms
- * are run. This needs to be turned into real per-frame data storage.
+ * \var IPASessionConfiguration::sensor.maxAnalogueGain
+ * \brief Maximum analogue gain supported with the sensor
+ *
+ * \var IPASessionConfiguration::sensor.defVBlank
+ * \brief The default vblank value of the sensor
+ *
+ * \var IPASessionConfiguration::sensor.lineDuration
+ * \brief Line duration in microseconds
+ *
+ * \var IPASessionConfiguration::sensor.size
+ * \brief Sensor output resolution
*/
/**
- * \var IPASessionConfiguration::agc
- * \brief AGC parameters configuration of the IPA
+ * \var IPASessionConfiguration::raw
+ * \brief Indicates if the camera is configured to capture raw frames
+ */
+
+/**
+ * \struct IPAActiveState
+ * \brief Active state for algorithms
*
- * \var IPASessionConfiguration::agc.minShutterSpeed
- * \brief Minimum shutter speed supported with the configured sensor
+ * The active state contains all algorithm-specific data that needs to be
+ * maintained by algorithms across frames. Unlike the session configuration,
+ * the active state is mutable and constantly updated by algorithms. The active
+ * state is accessible through the IPAContext structure.
*
- * \var IPASessionConfiguration::agc.maxShutterSpeed
- * \brief Maximum shutter speed supported with the configured sensor
+ * The active state stores two distinct categories of information:
*
- * \var IPASessionConfiguration::agc.minAnalogueGain
- * \brief Minimum analogue gain supported with the configured sensor
+ * - The consolidated value of all algorithm controls. Requests passed to
+ * the queueRequest() function store values for controls that the
+ * application wants to modify for that particular frame, and the
+ * queueRequest() function updates the active state with those values.
+ * The active state thus contains a consolidated view of the value of all
+ * controls handled by the algorithm.
*
- * \var IPASessionConfiguration::agc.maxAnalogueGain
- * \brief Maximum analogue gain supported with the configured sensor
+ * - The value of parameters computed by the algorithm when running in auto
+ * mode. Algorithms running in auto mode compute new parameters every
+ * time statistics buffers are received (either synchronously, or
+ * possibly in a background thread). The latest computed value of those
+ * parameters is stored in the active state in the process() function.
*
- * \var IPASessionConfiguration::agc.measureWindow
- * \brief AGC measure window
+ * Each of the members in the active state belongs to a specific algorithm. A
+ * member may be read by any algorithm, but shall only be written by its owner.
+ */
+
+/**
+ * \var IPAActiveState::agc
+ * \brief State for the Automatic Gain Control algorithm
*
- * \var IPASessionConfiguration::hw
- * \brief RkISP1-specific hardware information
+ * The exposure and gain are the latest values computed by the AGC algorithm.
*
- * \var IPASessionConfiguration::hw.revision
- * \brief Hardware revision of the ISP
+ * \var IPAActiveState::agc.exposure
+ * \brief Exposure time expressed as a number of lines
+ *
+ * \var IPAActiveState::agc.gain
+ * \brief Analogue gain multiplier
*/
/**
- * \var IPASessionConfiguration::awb
- * \brief AWB parameters configuration of the IPA
+ * \var IPAActiveState::awb
+ * \brief State for the Automatic White Balance algorithm
*
- * \var IPASessionConfiguration::awb.measureWindow
- * \brief AWB measure window
+ * \struct IPAActiveState::awb.gains
+ * \brief White balance gains
+ *
+ * \struct IPAActiveState::awb.gains.manual
+ * \brief Manual white balance gains (set through requests)
+ *
+ * \var IPAActiveState::awb.gains.manual.red
+ * \brief Manual white balance gain for R channel
+ *
+ * \var IPAActiveState::awb.gains.manual.green
+ * \brief Manual white balance gain for G channel
+ *
+ * \var IPAActiveState::awb.gains.manual.blue
+ * \brief Manual white balance gain for B channel
+ *
+ * \struct IPAActiveState::awb.gains.automatic
+ * \brief Automatic white balance gains (computed by the algorithm)
+ *
+ * \var IPAActiveState::awb.gains.automatic.red
+ * \brief Automatic white balance gain for R channel
+ *
+ * \var IPAActiveState::awb.gains.automatic.green
+ * \brief Automatic white balance gain for G channel
+ *
+ * \var IPAActiveState::awb.gains.automatic.blue
+ * \brief Automatic white balance gain for B channel
+ *
+ * \var IPAActiveState::awb.temperatureK
+ * \brief Estimated color temperature
+ *
+ * \var IPAActiveState::awb.autoEnabled
+ * \brief Whether the Auto White Balance algorithm is enabled
*/
/**
- * \var IPASessionConfiguration::sensor
- * \brief Sensor-specific configuration of the IPA
+ * \var IPAActiveState::cproc
+ * \brief State for the Color Processing algorithm
*
- * \var IPASessionConfiguration::sensor.lineDuration
- * \brief Line duration in microseconds
+ * \struct IPAActiveState::cproc.brightness
+ * \brief Brightness level
+ *
+ * \var IPAActiveState::cproc.contrast
+ * \brief Contrast level
+ *
+ * \var IPAActiveState::cproc.saturation
+ * \brief Saturation level
+ */
+
+/**
+ * \var IPAActiveState::dpf
+ * \brief State for the Denoise Pre-Filter algorithm
+ *
+ * \var IPAActiveState::dpf.denoise
+ * \brief Indicates if denoise is activated
+ */
+
+/**
+ * \var IPAActiveState::filter
+ * \brief State for the Filter algorithm
+ *
+ * \struct IPAActiveState::filter.denoise
+ * \brief Denoising level
+ *
+ * \var IPAActiveState::filter.sharpness
+ * \brief Sharpness level
+ */
+
+/**
+ * \struct IPAFrameContext
+ * \brief Per-frame context for algorithms
+ *
+ * The frame context stores two distinct categories of information:
+ *
+ * - The value of the controls to be applied to the frame. These values are
+ * typically set in the queueRequest() function, from the consolidated
+ * control values stored in the active state. The frame context thus stores
+ * values for all controls related to the algorithm, not limited to the
+ * controls specified in the corresponding request, but consolidated from all
+ * requests that have been queued so far.
+ *
+ * For controls that can be set manually or computed by an algorithm
+ * (depending on the algorithm operation mode), such as for instance the
+ * colour gains for the AWB algorithm, the control value will be stored in
+ * the frame context in the queueRequest() function only when operating in
+ * manual mode. When operating in auto mode, the values are computed by the
+ * algorithm in process(), stored in the active state, and copied to the
+ * frame context in prepare(), just before being stored in the ISP parameters
+ * buffer.
+ *
+ * The queueRequest() function can also store ancillary data in the frame
+ * context, such as flags to indicate if (and what) control values have
+ * changed compared to the previous request.
+ *
+ * - Status information computed by the algorithm for a frame. For instance,
+ * the colour temperature estimated by the AWB algorithm from ISP statistics
+ * calculated on a frame is stored in the frame context for that frame in
+ * the process() function.
*/
/**
* \var IPAFrameContext::agc
- * \brief Context for the Automatic Gain Control algorithm
+ * \brief Automatic Gain Control parameters for this frame
*
- * The exposure and gain determined are expected to be applied to the sensor
- * at the earliest opportunity.
+ * The exposure and gain are provided by the AGC algorithm, and are to be
+ * applied to the sensor in order to take effect for this frame.
*
* \var IPAFrameContext::agc.exposure
* \brief Exposure time expressed as a number of lines
@@ -115,7 +267,7 @@ namespace libcamera::ipa::rkisp1 {
/**
* \var IPAFrameContext::awb
- * \brief Context for the Automatic White Balance algorithm
+ * \brief Automatic White Balance parameters for this frame
*
* \struct IPAFrameContext::awb.gains
* \brief White balance gains
@@ -131,11 +283,59 @@ namespace libcamera::ipa::rkisp1 {
*
* \var IPAFrameContext::awb.temperatureK
* \brief Estimated color temperature
+ *
+ * \var IPAFrameContext::awb.autoEnabled
+ * \brief Whether the Auto White Balance algorithm is enabled
+ */
+
+/**
+ * \var IPAFrameContext::cproc
+ * \brief Color Processing parameters for this frame
+ *
+ * \struct IPAFrameContext::cproc.brightness
+ * \brief Brightness level
+ *
+ * \var IPAFrameContext::cproc.contrast
+ * \brief Contrast level
+ *
+ * \var IPAFrameContext::cproc.saturation
+ * \brief Saturation level
+ *
+ * \var IPAFrameContext::cproc.update
+ * \brief Indicates if the color processing parameters have been updated
+ * compared to the previous frame
+ */
+
+/**
+ * \var IPAFrameContext::dpf
+ * \brief Denoise Pre-Filter parameters for this frame
+ *
+ * \var IPAFrameContext::dpf.denoise
+ * \brief Indicates if denoise is activated
+ *
+ * \var IPAFrameContext::dpf.update
+ * \brief Indicates if the denoise pre-filter parameters have been updated
+ * compared to the previous frame
+ */
+
+/**
+ * \var IPAFrameContext::filter
+ * \brief Filter parameters for this frame
+ *
+ * \struct IPAFrameContext::filter.denoise
+ * \brief Denoising level
+ *
+ * \var IPAFrameContext::filter.sharpness
+ * \brief Sharpness level
+ *
+ * \var IPAFrameContext::filter.updateParams
+ * \brief Indicates if the filter parameters have been updated compared to the
+ * previous frame
*/
/**
* \var IPAFrameContext::sensor
- * \brief Effective sensor values
+ * \brief Sensor configuration that used been used for this frame
*
* \var IPAFrameContext::sensor.exposure
* \brief Exposure time expressed as a number of lines
@@ -145,12 +345,20 @@ namespace libcamera::ipa::rkisp1 {
*/
/**
- * \var IPAFrameContext::frameCount
- * \brief Counter of requests queued to the IPA module
+ * \struct IPAContext
+ * \brief Global IPA context data shared between all algorithms
+ *
+ * \var IPAContext::hw
+ * \brief RkISP1 version-specific hardware parameters
+ *
+ * \var IPAContext::configuration
+ * \brief The IPA session configuration, immutable during the session
+ *
+ * \var IPAContext::activeState
+ * \brief The IPA active state, storing the latest state for all algorithms
*
- * The counter is reset to 0 when the IPA module is configured, and is
- * incremented for each request being queued, after calling the
- * Algorithm::prepare() function of all algorithms.
+ * \var IPAContext::frameContexts
+ * \brief Ring buffer of per-frame contexts
*/
} /* namespace libcamera::ipa::rkisp1 */
diff --git a/src/ipa/rkisp1/ipa_context.h b/src/ipa/rkisp1/ipa_context.h
index f387cace..bd02a7a2 100644
--- a/src/ipa/rkisp1/ipa_context.h
+++ b/src/ipa/rkisp1/ipa_context.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * ipa_context.h - RkISP1 IPA Context
+ * RkISP1 IPA Context
*
*/
@@ -12,38 +12,105 @@
#include <libcamera/base/utils.h>
+#include <libcamera/controls.h>
#include <libcamera/geometry.h>
+#include <libipa/fc_queue.h>
+
namespace libcamera {
namespace ipa::rkisp1 {
+struct IPAHwSettings {
+ unsigned int numAeCells;
+ unsigned int numHistogramBins;
+ unsigned int numHistogramWeights;
+ unsigned int numGammaOutSamples;
+};
+
struct IPASessionConfiguration {
struct {
- utils::Duration minShutterSpeed;
- utils::Duration maxShutterSpeed;
- double minAnalogueGain;
- double maxAnalogueGain;
struct rkisp1_cif_isp_window measureWindow;
} agc;
struct {
struct rkisp1_cif_isp_window measureWindow;
+ bool enabled;
} awb;
struct {
+ bool enabled;
+ } lsc;
+
+ struct {
+ utils::Duration minShutterSpeed;
+ utils::Duration maxShutterSpeed;
+ double minAnalogueGain;
+ double maxAnalogueGain;
+
+ int32_t defVBlank;
utils::Duration lineDuration;
+ Size size;
} sensor;
+ bool raw;
+};
+
+struct IPAActiveState {
struct {
- rkisp1_cif_isp_version revision;
- } hw;
+ struct {
+ uint32_t exposure;
+ double gain;
+ } manual;
+ struct {
+ uint32_t exposure;
+ double gain;
+ } automatic;
+
+ bool autoEnabled;
+ uint32_t constraintMode;
+ uint32_t exposureMode;
+ } agc;
+
+ struct {
+ struct {
+ struct {
+ double red;
+ double green;
+ double blue;
+ } manual;
+ struct {
+ double red;
+ double green;
+ double blue;
+ } automatic;
+ } gains;
+
+ unsigned int temperatureK;
+ bool autoEnabled;
+ } awb;
+
+ struct {
+ int8_t brightness;
+ uint8_t contrast;
+ uint8_t saturation;
+ } cproc;
+
+ struct {
+ bool denoise;
+ } dpf;
+
+ struct {
+ uint8_t denoise;
+ uint8_t sharpness;
+ } filter;
};
-struct IPAFrameContext {
+struct IPAFrameContext : public FrameContext {
struct {
uint32_t exposure;
double gain;
+ bool autoEnabled;
} agc;
struct {
@@ -53,20 +120,42 @@ struct IPAFrameContext {
double blue;
} gains;
- double temperatureK;
+ unsigned int temperatureK;
+ bool autoEnabled;
} awb;
struct {
+ int8_t brightness;
+ uint8_t contrast;
+ uint8_t saturation;
+ bool update;
+ } cproc;
+
+ struct {
+ bool denoise;
+ bool update;
+ } dpf;
+
+ struct {
+ uint8_t denoise;
+ uint8_t sharpness;
+ bool update;
+ } filter;
+
+ struct {
uint32_t exposure;
double gain;
} sensor;
-
- unsigned int frameCount;
};
struct IPAContext {
+ const IPAHwSettings *hw;
IPASessionConfiguration configuration;
- IPAFrameContext frameContext;
+ IPAActiveState activeState;
+
+ FCQueue<IPAFrameContext> frameContexts;
+
+ ControlInfoMap::Map ctrlMap;
};
} /* namespace ipa::rkisp1 */
diff --git a/src/ipa/rkisp1/meson.build b/src/ipa/rkisp1/meson.build
index ccb84b27..e813da53 100644
--- a/src/ipa/rkisp1/meson.build
+++ b/src/ipa/rkisp1/meson.build
@@ -29,3 +29,5 @@ if ipa_sign_module
install : false,
build_by_default : true)
endif
+
+ipa_names += ipa_name
diff --git a/src/ipa/rkisp1/module.h b/src/ipa/rkisp1/module.h
index 89f83208..16c3e43e 100644
--- a/src/ipa/rkisp1/module.h
+++ b/src/ipa/rkisp1/module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.h - RkISP1 IPA Module
+ * RkISP1 IPA Module
*/
#pragma once
diff --git a/src/ipa/rkisp1/rkisp1.cpp b/src/ipa/rkisp1/rkisp1.cpp
index 21166b0f..6687c91e 100644
--- a/src/ipa/rkisp1/rkisp1.cpp
+++ b/src/ipa/rkisp1/rkisp1.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - RkISP1 Image Processing Algorithms
+ * RkISP1 Image Processing Algorithms
*/
#include <algorithm>
@@ -24,13 +24,11 @@
#include <libcamera/ipa/rkisp1_ipa_interface.h>
#include <libcamera/request.h>
+#include "libcamera/internal/formats.h"
#include "libcamera/internal/mapped_framebuffer.h"
#include "libcamera/internal/yaml_parser.h"
-#include "algorithms/agc.h"
#include "algorithms/algorithm.h"
-#include "algorithms/awb.h"
-#include "algorithms/blc.h"
#include "libipa/camera_sensor_helper.h"
#include "ipa_context.h"
@@ -43,16 +41,24 @@ using namespace std::literals::chrono_literals;
namespace ipa::rkisp1 {
+/* Maximum number of frame contexts to be held */
+static constexpr uint32_t kMaxFrameContexts = 16;
+
class IPARkISP1 : public IPARkISP1Interface, public Module
{
public:
- int init(const IPASettings &settings, unsigned int hwRevision) override;
+ IPARkISP1();
+
+ int init(const IPASettings &settings, unsigned int hwRevision,
+ const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls) override;
int start() override;
- void stop() override {}
+ void stop() override;
- int configure(const IPACameraSensorInfo &info,
+ int configure(const IPAConfigInfo &ipaConfig,
const std::map<uint32_t, IPAStream> &streamConfig,
- const std::map<uint32_t, ControlInfoMap> &entityControls) override;
+ ControlInfoMap *ipaControls) override;
void mapBuffers(const std::vector<IPABuffer> &buffers) override;
void unmapBuffers(const std::vector<unsigned int> &ids) override;
@@ -65,22 +71,15 @@ protected:
std::string logPrefix() const override;
private:
+ void updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls);
void setControls(unsigned int frame);
- void prepareMetadata(unsigned int frame, unsigned int aeState);
std::map<unsigned int, FrameBuffer> buffers_;
std::map<unsigned int, MappedFrameBuffer> mappedBuffers_;
- ControlInfoMap ctrls_;
-
- /* Camera sensor controls. */
- bool autoExposure_;
-
- /* revision-specific data */
- rkisp1_cif_isp_version hwRevision_;
- unsigned int hwHistBinNMax_;
- unsigned int hwGammaOutMaxSamples_;
- unsigned int hwHistogramWeightGridsSize_;
+ ControlInfoMap sensorControls_;
/* Interface to the Camera Helper */
std::unique_ptr<CameraSensorHelper> camHelper_;
@@ -89,24 +88,59 @@ private:
struct IPAContext context_;
};
+namespace {
+
+const IPAHwSettings ipaHwSettingsV10{
+ RKISP1_CIF_ISP_AE_MEAN_MAX_V10,
+ RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10,
+ RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10,
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10,
+};
+
+const IPAHwSettings ipaHwSettingsV12{
+ RKISP1_CIF_ISP_AE_MEAN_MAX_V12,
+ RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12,
+ RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12,
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12,
+};
+
+/* List of controls handled by the RkISP1 IPA */
+const ControlInfoMap::Map rkisp1Controls{
+ { &controls::AeEnable, ControlInfo(false, true) },
+ { &controls::AwbEnable, ControlInfo(false, true) },
+ { &controls::ColourGains, ControlInfo(0.0f, 3.996f, 1.0f) },
+ { &controls::Brightness, ControlInfo(-1.0f, 0.993f, 0.0f) },
+ { &controls::Contrast, ControlInfo(0.0f, 1.993f, 1.0f) },
+ { &controls::Saturation, ControlInfo(0.0f, 1.993f, 1.0f) },
+ { &controls::Sharpness, ControlInfo(0.0f, 10.0f, 1.0f) },
+ { &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) },
+};
+
+} /* namespace */
+
+IPARkISP1::IPARkISP1()
+ : context_({ {}, {}, {}, { kMaxFrameContexts }, {} })
+{
+}
+
std::string IPARkISP1::logPrefix() const
{
return "rkisp1";
}
-int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision)
+int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision,
+ const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls)
{
/* \todo Add support for other revisions */
switch (hwRevision) {
case RKISP1_V10:
- hwHistBinNMax_ = RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10;
- hwGammaOutMaxSamples_ = RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10;
- hwHistogramWeightGridsSize_ = RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10;
+ case RKISP1_V_IMX8MP:
+ context_.hw = &ipaHwSettingsV10;
break;
case RKISP1_V12:
- hwHistBinNMax_ = RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12;
- hwGammaOutMaxSamples_ = RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12;
- hwHistogramWeightGridsSize_ = RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12;
+ context_.hw = &ipaHwSettingsV12;
break;
default:
LOG(IPARkISP1, Error)
@@ -117,10 +151,7 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision)
LOG(IPARkISP1, Debug) << "Hardware revision is " << hwRevision;
- /* Cache the value to set it in configure. */
- hwRevision_ = static_cast<rkisp1_cif_isp_version>(hwRevision);
-
- camHelper_ = CameraSensorHelperFactory::create(settings.sensorModel);
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
if (!camHelper_) {
LOG(IPARkISP1, Error)
<< "Failed to create camera sensor helper for "
@@ -128,8 +159,11 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision)
return -ENODEV;
}
+ context_.configuration.sensor.lineDuration = sensorInfo.minLineLength
+ * 1.0s / sensorInfo.pixelRate;
+
/* Load the tuning data file. */
- File file(settings.configurationFile.c_str());
+ File file(settings.configurationFile);
if (!file.open(File::OpenModeFlag::ReadOnly)) {
int ret = file.error();
LOG(IPARkISP1, Error)
@@ -155,7 +189,14 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision)
return -EINVAL;
}
- return createAlgorithms(context_, (*data)["algorithms"]);
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
+
+ /* Initialize controls. */
+ updateControls(sensorInfo, sensorControls, ipaControls);
+
+ return 0;
}
int IPARkISP1::start()
@@ -165,52 +206,42 @@ int IPARkISP1::start()
return 0;
}
-/**
- * \todo The RkISP1 pipeline currently provides an empty IPACameraSensorInfo
- * if the connected sensor does not provide enough information to properly
- * assemble one. Make sure the reported sensor information are relevant
- * before accessing them.
- */
-int IPARkISP1::configure([[maybe_unused]] const IPACameraSensorInfo &info,
- [[maybe_unused]] const std::map<uint32_t, IPAStream> &streamConfig,
- const std::map<uint32_t, ControlInfoMap> &entityControls)
+void IPARkISP1::stop()
{
- if (entityControls.empty())
- return -EINVAL;
-
- ctrls_ = entityControls.at(0);
-
- const auto itExp = ctrls_.find(V4L2_CID_EXPOSURE);
- if (itExp == ctrls_.end()) {
- LOG(IPARkISP1, Error) << "Can't find exposure control";
- return -EINVAL;
- }
-
- const auto itGain = ctrls_.find(V4L2_CID_ANALOGUE_GAIN);
- if (itGain == ctrls_.end()) {
- LOG(IPARkISP1, Error) << "Can't find gain control";
- return -EINVAL;
- }
+ context_.frameContexts.clear();
+}
- autoExposure_ = true;
+int IPARkISP1::configure(const IPAConfigInfo &ipaConfig,
+ const std::map<uint32_t, IPAStream> &streamConfig,
+ ControlInfoMap *ipaControls)
+{
+ sensorControls_ = ipaConfig.sensorControls;
+ const auto itExp = sensorControls_.find(V4L2_CID_EXPOSURE);
int32_t minExposure = itExp->second.min().get<int32_t>();
int32_t maxExposure = itExp->second.max().get<int32_t>();
+ const auto itGain = sensorControls_.find(V4L2_CID_ANALOGUE_GAIN);
int32_t minGain = itGain->second.min().get<int32_t>();
int32_t maxGain = itGain->second.max().get<int32_t>();
- LOG(IPARkISP1, Info)
- << "Exposure: " << minExposure << "-" << maxExposure
- << " Gain: " << minGain << "-" << maxGain;
+ LOG(IPARkISP1, Debug)
+ << "Exposure: [" << minExposure << ", " << maxExposure
+ << "], gain: [" << minGain << ", " << maxGain << "]";
- /* Clean context at configuration */
- context_ = {};
+ /* Clear the IPA context before the streaming session. */
+ context_.configuration = {};
+ context_.activeState = {};
+ context_.frameContexts.clear();
- /* Set the hardware revision for the algorithms. */
- context_.configuration.hw.revision = hwRevision_;
+ const IPACameraSensorInfo &info = ipaConfig.sensorInfo;
+ const ControlInfo vBlank = sensorControls_.find(V4L2_CID_VBLANK)->second;
+ context_.configuration.sensor.defVBlank = vBlank.def().get<int32_t>();
+ context_.configuration.sensor.size = info.outputSize;
+ context_.configuration.sensor.lineDuration = info.minLineLength * 1.0s / info.pixelRate;
- context_.configuration.sensor.lineDuration = info.lineLength * 1.0s / info.pixelRate;
+ /* Update the camera controls using the new sensor settings. */
+ updateControls(info, sensorControls_, ipaControls);
/*
* When the AGC computes the new exposure values for a frame, it needs
@@ -219,14 +250,28 @@ int IPARkISP1::configure([[maybe_unused]] const IPACameraSensorInfo &info,
*
* \todo take VBLANK into account for maximum shutter speed
*/
- context_.configuration.agc.minShutterSpeed = minExposure * context_.configuration.sensor.lineDuration;
- context_.configuration.agc.maxShutterSpeed = maxExposure * context_.configuration.sensor.lineDuration;
- context_.configuration.agc.minAnalogueGain = camHelper_->gain(minGain);
- context_.configuration.agc.maxAnalogueGain = camHelper_->gain(maxGain);
-
- context_.frameContext.frameCount = 0;
+ context_.configuration.sensor.minShutterSpeed =
+ minExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.sensor.maxShutterSpeed =
+ maxExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.sensor.minAnalogueGain = camHelper_->gain(minGain);
+ context_.configuration.sensor.maxAnalogueGain = camHelper_->gain(maxGain);
+
+ context_.configuration.raw = std::any_of(streamConfig.begin(), streamConfig.end(),
+ [](auto &cfg) -> bool {
+ PixelFormat pixelFormat{ cfg.second.pixelFormat };
+ const PixelFormatInfo &format = PixelFormatInfo::info(pixelFormat);
+ return format.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+ });
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+
+ /* Disable algorithms that don't support raw formats. */
+ algo->disabled_ = context_.configuration.raw && !algo->supportsRaw_;
+ if (algo->disabled_)
+ continue;
- for (auto const &algo : algorithms()) {
int ret = algo->configure(context_, info);
if (ret)
return ret;
@@ -265,14 +310,22 @@ void IPARkISP1::unmapBuffers(const std::vector<unsigned int> &ids)
}
}
-void IPARkISP1::queueRequest([[maybe_unused]] const uint32_t frame,
- [[maybe_unused]] const ControlList &controls)
+void IPARkISP1::queueRequest(const uint32_t frame, const ControlList &controls)
{
- /* \todo Start processing for 'frame' based on 'controls'. */
+ IPAFrameContext &frameContext = context_.frameContexts.alloc(frame);
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+ if (algo->disabled_)
+ continue;
+ algo->queueRequest(context_, frame, frameContext, controls);
+ }
}
void IPARkISP1::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId)
{
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+
rkisp1_params_cfg *params =
reinterpret_cast<rkisp1_params_cfg *>(
mappedBuffers_.at(bufferId).planes()[0].data());
@@ -281,54 +334,119 @@ void IPARkISP1::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId)
memset(params, 0, sizeof(*params));
for (auto const &algo : algorithms())
- algo->prepare(context_, params);
+ algo->prepare(context_, frame, frameContext, params);
paramsBufferReady.emit(frame);
- context_.frameContext.frameCount++;
}
void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId,
const ControlList &sensorControls)
{
- const rkisp1_stat_buffer *stats =
- reinterpret_cast<rkisp1_stat_buffer *>(
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+
+ /*
+ * In raw capture mode, the ISP is bypassed and no statistics buffer is
+ * provided.
+ */
+ const rkisp1_stat_buffer *stats = nullptr;
+ if (!context_.configuration.raw)
+ stats = reinterpret_cast<rkisp1_stat_buffer *>(
mappedBuffers_.at(bufferId).planes()[0].data());
- context_.frameContext.sensor.exposure =
+ frameContext.sensor.exposure =
sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
- context_.frameContext.sensor.gain =
+ frameContext.sensor.gain =
camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
- unsigned int aeState = 0;
+ ControlList metadata(controls::controls);
- for (auto const &algo : algorithms())
- algo->process(context_, nullptr, stats);
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+ if (algo->disabled_)
+ continue;
+ algo->process(context_, frame, frameContext, stats, metadata);
+ }
setControls(frame);
- prepareMetadata(frame, aeState);
+ metadataReady.emit(frame, metadata);
}
-void IPARkISP1::setControls(unsigned int frame)
+void IPARkISP1::updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls)
{
- uint32_t exposure = context_.frameContext.agc.exposure;
- uint32_t gain = camHelper_->gainCode(context_.frameContext.agc.gain);
+ ControlInfoMap::Map ctrlMap = rkisp1Controls;
- ControlList ctrls(ctrls_);
- ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure));
- ctrls.set(V4L2_CID_ANALOGUE_GAIN, static_cast<int32_t>(gain));
+ /*
+ * Compute exposure time limits from the V4L2_CID_EXPOSURE control
+ * limits and the line duration.
+ */
+ double lineDuration = context_.configuration.sensor.lineDuration.get<std::micro>();
+ const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second;
+ int32_t minExposure = v4l2Exposure.min().get<int32_t>() * lineDuration;
+ int32_t maxExposure = v4l2Exposure.max().get<int32_t>() * lineDuration;
+ int32_t defExposure = v4l2Exposure.def().get<int32_t>() * lineDuration;
+ ctrlMap.emplace(std::piecewise_construct,
+ std::forward_as_tuple(&controls::ExposureTime),
+ std::forward_as_tuple(minExposure, maxExposure, defExposure));
+
+ /* Compute the analogue gain limits. */
+ const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
+ float minGain = camHelper_->gain(v4l2Gain.min().get<int32_t>());
+ float maxGain = camHelper_->gain(v4l2Gain.max().get<int32_t>());
+ float defGain = camHelper_->gain(v4l2Gain.def().get<int32_t>());
+ ctrlMap.emplace(std::piecewise_construct,
+ std::forward_as_tuple(&controls::AnalogueGain),
+ std::forward_as_tuple(minGain, maxGain, defGain));
- setSensorControls.emit(frame, ctrls);
+ /*
+ * Compute the frame duration limits.
+ *
+ * The frame length is computed assuming a fixed line length combined
+ * with the vertical frame sizes.
+ */
+ const ControlInfo &v4l2HBlank = sensorControls.find(V4L2_CID_HBLANK)->second;
+ uint32_t hblank = v4l2HBlank.def().get<int32_t>();
+ uint32_t lineLength = sensorInfo.outputSize.width + hblank;
+
+ const ControlInfo &v4l2VBlank = sensorControls.find(V4L2_CID_VBLANK)->second;
+ std::array<uint32_t, 3> frameHeights{
+ v4l2VBlank.min().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.max().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.def().get<int32_t>() + sensorInfo.outputSize.height,
+ };
+
+ std::array<int64_t, 3> frameDurations;
+ for (unsigned int i = 0; i < frameHeights.size(); ++i) {
+ uint64_t frameSize = lineLength * frameHeights[i];
+ frameDurations[i] = frameSize / (sensorInfo.pixelRate / 1000000U);
+ }
+
+ ctrlMap[&controls::FrameDurationLimits] = ControlInfo(frameDurations[0],
+ frameDurations[1],
+ frameDurations[2]);
+
+ ctrlMap.merge(context_.ctrlMap);
+ *ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls);
}
-void IPARkISP1::prepareMetadata(unsigned int frame, unsigned int aeState)
+void IPARkISP1::setControls(unsigned int frame)
{
- ControlList ctrls(controls::controls);
+ /*
+ * \todo The frame number is most likely wrong here, we need to take
+ * internal sensor delays and other timing parameters into account.
+ */
+
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+ uint32_t exposure = frameContext.agc.exposure;
+ uint32_t gain = camHelper_->gainCode(frameContext.agc.gain);
- if (aeState)
- ctrls.set(controls::AeLocked, aeState == 2);
+ ControlList ctrls(sensorControls_);
+ ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure));
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN, static_cast<int32_t>(gain));
- metadataReady.emit(frame, ctrls);
+ setSensorControls.emit(frame, ctrls);
}
} /* namespace ipa::rkisp1 */
@@ -341,7 +459,7 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
1,
- "PipelineHandlerRkISP1",
+ "rkisp1",
"rkisp1",
};
diff --git a/src/ipa/raspberrypi/README.md b/src/ipa/rpi/README.md
index 94a8ccc8..94a8ccc8 100644
--- a/src/ipa/raspberrypi/README.md
+++ b/src/ipa/rpi/README.md
diff --git a/src/ipa/rpi/cam_helper/cam_helper.cpp b/src/ipa/rpi/cam_helper/cam_helper.cpp
new file mode 100644
index 00000000..ee5d011f
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper.cpp
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * helper information for different sensors
+ */
+
+#include <linux/videodev2.h>
+
+#include <limits>
+#include <map>
+#include <string.h>
+
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "cam_helper.h"
+#include "md_parser.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+using namespace std::literals::chrono_literals;
+
+namespace libcamera {
+LOG_DECLARE_CATEGORY(IPARPI)
+}
+
+namespace {
+
+std::map<std::string, CamHelperCreateFunc> &camHelpers()
+{
+ static std::map<std::string, CamHelperCreateFunc> helpers;
+ return helpers;
+}
+
+} /* namespace */
+
+CamHelper *CamHelper::create(std::string const &camName)
+{
+ /*
+ * CamHelpers get registered by static RegisterCamHelper
+ * initialisers.
+ */
+ for (auto &p : camHelpers()) {
+ if (camName.find(p.first) != std::string::npos)
+ return p.second();
+ }
+
+ return nullptr;
+}
+
+CamHelper::CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff)
+ : parser_(std::move(parser)), frameIntegrationDiff_(frameIntegrationDiff)
+{
+}
+
+CamHelper::~CamHelper()
+{
+}
+
+void CamHelper::prepare(Span<const uint8_t> buffer,
+ Metadata &metadata)
+{
+ parseEmbeddedData(buffer, metadata);
+}
+
+void CamHelper::process([[maybe_unused]] StatisticsPtr &stats,
+ [[maybe_unused]] Metadata &metadata)
+{
+}
+
+uint32_t CamHelper::exposureLines(const Duration exposure, const Duration lineLength) const
+{
+ return exposure / lineLength;
+}
+
+Duration CamHelper::exposure(uint32_t exposureLines, const Duration lineLength) const
+{
+ return exposureLines * lineLength;
+}
+
+std::pair<uint32_t, uint32_t> CamHelper::getBlanking(Duration &exposure,
+ Duration minFrameDuration,
+ Duration maxFrameDuration) const
+{
+ uint32_t frameLengthMin, frameLengthMax, vblank, hblank;
+ Duration lineLength = mode_.minLineLength;
+
+ /*
+ * minFrameDuration and maxFrameDuration are clamped by the caller
+ * based on the limits for the active sensor mode.
+ *
+ * frameLengthMax gets calculated on the smallest line length as we do
+ * not want to extend that unless absolutely necessary.
+ */
+ frameLengthMin = minFrameDuration / mode_.minLineLength;
+ frameLengthMax = maxFrameDuration / mode_.minLineLength;
+
+ /*
+ * Watch out for (exposureLines + frameIntegrationDiff_) overflowing a
+ * uint32_t in the std::clamp() below when the exposure time is
+ * extremely (extremely!) long - as happens when the IPA calculates the
+ * maximum possible exposure time.
+ */
+ uint32_t exposureLines = std::min(CamHelper::exposureLines(exposure, lineLength),
+ std::numeric_limits<uint32_t>::max() - frameIntegrationDiff_);
+ uint32_t frameLengthLines = std::clamp(exposureLines + frameIntegrationDiff_,
+ frameLengthMin, frameLengthMax);
+
+ /*
+ * If our frame length lines is above the maximum allowed, see if we can
+ * extend the line length to accommodate the requested frame length.
+ */
+ if (frameLengthLines > mode_.maxFrameLength) {
+ Duration lineLengthAdjusted = lineLength * frameLengthLines / mode_.maxFrameLength;
+ lineLength = std::min(mode_.maxLineLength, lineLengthAdjusted);
+ frameLengthLines = mode_.maxFrameLength;
+ }
+
+ hblank = lineLengthToHblank(lineLength);
+ vblank = frameLengthLines - mode_.height;
+
+ /*
+ * Limit the exposure to the maximum frame duration requested, and
+ * re-calculate if it has been clipped.
+ */
+ exposureLines = std::min(frameLengthLines - frameIntegrationDiff_,
+ CamHelper::exposureLines(exposure, lineLength));
+ exposure = CamHelper::exposure(exposureLines, lineLength);
+
+ return { vblank, hblank };
+}
+
+Duration CamHelper::hblankToLineLength(uint32_t hblank) const
+{
+ return (mode_.width + hblank) * (1.0s / mode_.pixelRate);
+}
+
+uint32_t CamHelper::lineLengthToHblank(const Duration &lineLength) const
+{
+ return (lineLength * mode_.pixelRate / 1.0s) - mode_.width;
+}
+
+Duration CamHelper::lineLengthPckToDuration(uint32_t lineLengthPck) const
+{
+ return lineLengthPck * (1.0s / mode_.pixelRate);
+}
+
+void CamHelper::setCameraMode(const CameraMode &mode)
+{
+ mode_ = mode;
+ if (parser_) {
+ parser_->reset();
+ parser_->setBitsPerPixel(mode.bitdepth);
+ parser_->setLineLengthBytes(0); /* We use SetBufferSize. */
+ }
+}
+
+void CamHelper::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
+{
+ /*
+ * These values are correct for many sensors. Other sensors will
+ * need to over-ride this function.
+ */
+ exposureDelay = 2;
+ gainDelay = 1;
+ vblankDelay = 2;
+ hblankDelay = 2;
+}
+
+bool CamHelper::sensorEmbeddedDataPresent() const
+{
+ return false;
+}
+
+double CamHelper::getModeSensitivity([[maybe_unused]] const CameraMode &mode) const
+{
+ /*
+ * Most sensors have the same sensitivity in every mode, but this
+ * function can be overridden for those that do not. Note that it is
+ * called before mode_ is set, so it must return the sensitivity
+ * of the mode that is passed in.
+ */
+ return 1.0;
+}
+
+unsigned int CamHelper::hideFramesStartup() const
+{
+ /*
+ * The number of frames when a camera first starts that shouldn't be
+ * displayed as they are invalid in some way.
+ */
+ return 0;
+}
+
+unsigned int CamHelper::hideFramesModeSwitch() const
+{
+ /* After a mode switch, many sensors return valid frames immediately. */
+ return 0;
+}
+
+unsigned int CamHelper::mistrustFramesStartup() const
+{
+ /* Many sensors return a single bad frame on start-up. */
+ return 1;
+}
+
+unsigned int CamHelper::mistrustFramesModeSwitch() const
+{
+ /* Many sensors return valid metadata immediately. */
+ return 0;
+}
+
+void CamHelper::parseEmbeddedData(Span<const uint8_t> buffer,
+ Metadata &metadata)
+{
+ MdParser::RegisterMap registers;
+ Metadata parsedMetadata;
+
+ if (buffer.empty())
+ return;
+
+ if (parser_->parse(buffer, registers) != MdParser::Status::OK) {
+ LOG(IPARPI, Error) << "Embedded data buffer parsing failed";
+ return;
+ }
+
+ populateMetadata(registers, parsedMetadata);
+ metadata.merge(parsedMetadata);
+
+ /*
+ * Overwrite the exposure/gain, line/frame length and sensor temperature values
+ * in the existing DeviceStatus with values from the parsed embedded buffer.
+ * Fetch it first in case any other fields were set meaningfully.
+ */
+ DeviceStatus deviceStatus, parsedDeviceStatus;
+ if (metadata.get("device.status", deviceStatus) ||
+ parsedMetadata.get("device.status", parsedDeviceStatus)) {
+ LOG(IPARPI, Error) << "DeviceStatus not found";
+ return;
+ }
+
+ deviceStatus.shutterSpeed = parsedDeviceStatus.shutterSpeed;
+ deviceStatus.analogueGain = parsedDeviceStatus.analogueGain;
+ deviceStatus.frameLength = parsedDeviceStatus.frameLength;
+ deviceStatus.lineLength = parsedDeviceStatus.lineLength;
+ if (parsedDeviceStatus.sensorTemperature)
+ deviceStatus.sensorTemperature = parsedDeviceStatus.sensorTemperature;
+
+ LOG(IPARPI, Debug) << "Metadata updated - " << deviceStatus;
+
+ metadata.set("device.status", deviceStatus);
+}
+
+void CamHelper::populateMetadata([[maybe_unused]] const MdParser::RegisterMap &registers,
+ [[maybe_unused]] Metadata &metadata) const
+{
+}
+
+RegisterCamHelper::RegisterCamHelper(char const *camName,
+ CamHelperCreateFunc createFunc)
+{
+ camHelpers()[std::string(camName)] = createFunc;
+}
diff --git a/src/ipa/rpi/cam_helper/cam_helper.h b/src/ipa/rpi/cam_helper/cam_helper.h
new file mode 100644
index 00000000..4a4ab5e6
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * helper class providing camera information
+ */
+#pragma once
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+#include "controller/camera_mode.h"
+#include "controller/controller.h"
+#include "controller/metadata.h"
+#include "md_parser.h"
+
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace RPiController {
+
+/*
+ * The CamHelper class provides a number of facilities that anyone trying
+ * to drive a camera will need to know, but which are not provided by the
+ * standard driver framework. Specifically, it provides:
+ *
+ * A "CameraMode" structure to describe extra information about the chosen
+ * mode of the driver. For example, how it is cropped from the full sensor
+ * area, how it is scaled, whether pixels are averaged compared to the full
+ * resolution.
+ *
+ * The ability to convert between number of lines of exposure and actual
+ * exposure time, and to convert between the sensor's gain codes and actual
+ * gains.
+ *
+ * A function to return the number of frames of delay between updating exposure,
+ * analogue gain and vblanking, and for the changes to take effect. For many
+ * sensors these take the values 2, 1 and 2 respectively, but sensors that are
+ * different will need to over-ride the default function provided.
+ *
+ * A function to query if the sensor outputs embedded data that can be parsed.
+ *
+ * A function to return the sensitivity of a given camera mode.
+ *
+ * A parser to parse the embedded data buffers provided by some sensors (for
+ * example, the imx219 does; the ov5647 doesn't). This allows us to know for
+ * sure the exposure and gain of the frame we're looking at. CamHelper
+ * provides functions for converting analogue gains to and from the sensor's
+ * native gain codes.
+ *
+ * Finally, a set of functions that determine how to handle the vagaries of
+ * different camera modules on start-up or when switching modes. Some
+ * modules may produce one or more frames that are not yet correctly exposed,
+ * or where the metadata may be suspect. We have the following functions:
+ * HideFramesStartup(): Tell the pipeline handler not to return this many
+ * frames at start-up. This can also be used to hide initial frames
+ * while the AGC and other algorithms are sorting themselves out.
+ * HideFramesModeSwitch(): Tell the pipeline handler not to return this
+ * many frames after a mode switch (other than start-up). Some sensors
+ * may produce innvalid frames after a mode switch; others may not.
+ * MistrustFramesStartup(): At start-up a sensor may return frames for
+ * which we should not run any control algorithms (for example, metadata
+ * may be invalid).
+ * MistrustFramesModeSwitch(): The number of frames, after a mode switch
+ * (other than start-up), for which control algorithms should not run
+ * (for example, metadata may be unreliable).
+ */
+
+class CamHelper
+{
+public:
+ static CamHelper *create(std::string const &camName);
+ CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff);
+ virtual ~CamHelper();
+ void setCameraMode(const CameraMode &mode);
+ virtual void prepare(libcamera::Span<const uint8_t> buffer,
+ Metadata &metadata);
+ virtual void process(StatisticsPtr &stats, Metadata &metadata);
+ virtual uint32_t exposureLines(const libcamera::utils::Duration exposure,
+ const libcamera::utils::Duration lineLength) const;
+ virtual libcamera::utils::Duration exposure(uint32_t exposureLines,
+ const libcamera::utils::Duration lineLength) const;
+ virtual std::pair<uint32_t, uint32_t> getBlanking(libcamera::utils::Duration &exposure,
+ libcamera::utils::Duration minFrameDuration,
+ libcamera::utils::Duration maxFrameDuration) const;
+ libcamera::utils::Duration hblankToLineLength(uint32_t hblank) const;
+ uint32_t lineLengthToHblank(const libcamera::utils::Duration &duration) const;
+ libcamera::utils::Duration lineLengthPckToDuration(uint32_t lineLengthPck) const;
+ virtual uint32_t gainCode(double gain) const = 0;
+ virtual double gain(uint32_t gainCode) const = 0;
+ virtual void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const;
+ virtual bool sensorEmbeddedDataPresent() const;
+ virtual double getModeSensitivity(const CameraMode &mode) const;
+ virtual unsigned int hideFramesStartup() const;
+ virtual unsigned int hideFramesModeSwitch() const;
+ virtual unsigned int mistrustFramesStartup() const;
+ virtual unsigned int mistrustFramesModeSwitch() const;
+
+protected:
+ void parseEmbeddedData(libcamera::Span<const uint8_t> buffer,
+ Metadata &metadata);
+ virtual void populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const;
+
+ std::unique_ptr<MdParser> parser_;
+ CameraMode mode_;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ unsigned int frameIntegrationDiff_;
+};
+
+/*
+ * This is for registering camera helpers with the system, so that the
+ * CamHelper::Create function picks them up automatically.
+ */
+
+typedef CamHelper *(*CamHelperCreateFunc)();
+struct RegisterCamHelper
+{
+ RegisterCamHelper(char const *camName,
+ CamHelperCreateFunc createFunc);
+};
+
+} /* namespace RPi */
diff --git a/src/ipa/raspberrypi/cam_helper_imx219.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp
index a3caab71..91461f7a 100644
--- a/src/ipa/raspberrypi/cam_helper_imx219.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * cam_helper_imx219.cpp - camera helper for imx219 sensor
+ * camera helper for imx219 sensor
*/
#include <assert.h>
@@ -16,9 +16,9 @@
*/
#define ENABLE_EMBEDDED_DATA 0
-#include "cam_helper.hpp"
+#include "cam_helper.h"
#if ENABLE_EMBEDDED_DATA
-#include "md_parser.hpp"
+#include "md_parser.h"
#endif
using namespace RPiController;
@@ -32,17 +32,20 @@ constexpr uint32_t expHiReg = 0x15a;
constexpr uint32_t expLoReg = 0x15b;
constexpr uint32_t frameLengthHiReg = 0x160;
constexpr uint32_t frameLengthLoReg = 0x161;
+constexpr uint32_t lineLengthHiReg = 0x162;
+constexpr uint32_t lineLengthLoReg = 0x163;
constexpr std::initializer_list<uint32_t> registerList [[maybe_unused]]
- = { expHiReg, expLoReg, gainReg, frameLengthHiReg, frameLengthLoReg };
+ = { expHiReg, expLoReg, gainReg, frameLengthHiReg, frameLengthLoReg,
+ lineLengthHiReg, lineLengthLoReg };
class CamHelperImx219 : public CamHelper
{
public:
CamHelperImx219();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- unsigned int MistrustFramesModeSwitch() const override;
- bool SensorEmbeddedDataPresent() const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int mistrustFramesModeSwitch() const override;
+ bool sensorEmbeddedDataPresent() const override;
private:
/*
@@ -51,7 +54,7 @@ private:
*/
static constexpr int frameIntegrationDiff = 4;
- void PopulateMetadata(const MdParser::RegisterMap &registers,
+ void populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const override;
};
@@ -64,17 +67,17 @@ CamHelperImx219::CamHelperImx219()
{
}
-uint32_t CamHelperImx219::GainCode(double gain) const
+uint32_t CamHelperImx219::gainCode(double gain) const
{
return (uint32_t)(256 - 256 / gain);
}
-double CamHelperImx219::Gain(uint32_t gain_code) const
+double CamHelperImx219::gain(uint32_t gainCode) const
{
- return 256.0 / (256 - gain_code);
+ return 256.0 / (256 - gainCode);
}
-unsigned int CamHelperImx219::MistrustFramesModeSwitch() const
+unsigned int CamHelperImx219::mistrustFramesModeSwitch() const
{
/*
* For reasons unknown, we do occasionally get a bogus metadata frame
@@ -84,26 +87,29 @@ unsigned int CamHelperImx219::MistrustFramesModeSwitch() const
return 1;
}
-bool CamHelperImx219::SensorEmbeddedDataPresent() const
+bool CamHelperImx219::sensorEmbeddedDataPresent() const
{
return ENABLE_EMBEDDED_DATA;
}
-void CamHelperImx219::PopulateMetadata(const MdParser::RegisterMap &registers,
+void CamHelperImx219::populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
- deviceStatus.shutter_speed = Exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
- deviceStatus.analogue_gain = Gain(registers.at(gainReg));
- deviceStatus.frame_length = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
+ registers.at(lineLengthLoReg));
+ deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.lineLength);
+ deviceStatus.analogueGain = gain(registers.at(gainReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
- metadata.Set("device.status", deviceStatus);
+ metadata.set("device.status", deviceStatus);
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperImx219();
}
-static RegisterCamHelper reg("imx219", &Create);
+static RegisterCamHelper reg("imx219", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp
new file mode 100644
index 00000000..24275e12
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * camera helper for imx290 sensor
+ */
+
+#include <math.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperImx290 : public CamHelper
+{
+public:
+ CamHelperImx290();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const override;
+ unsigned int hideFramesStartup() const override;
+ unsigned int hideFramesModeSwitch() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 2;
+};
+
+CamHelperImx290::CamHelperImx290()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx290::gainCode(double gain) const
+{
+ int code = 66.6667 * log10(gain);
+ return std::max(0, std::min(code, 0xf0));
+}
+
+double CamHelperImx290::gain(uint32_t gainCode) const
+{
+ return pow(10, 0.015 * gainCode);
+}
+
+void CamHelperImx290::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
+{
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 2;
+ hblankDelay = 2;
+}
+
+unsigned int CamHelperImx290::hideFramesStartup() const
+{
+ /* On startup, we seem to get 1 bad frame. */
+ return 1;
+}
+
+unsigned int CamHelperImx290::hideFramesModeSwitch() const
+{
+ /* After a mode switch, we seem to get 1 bad frame. */
+ return 1;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx290();
+}
+
+static RegisterCamHelper reg("imx290", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp
new file mode 100644
index 00000000..d4a4fa79
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Camera helper for IMX296 sensor
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <stddef.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+using libcamera::utils::Duration;
+using namespace std::literals::chrono_literals;
+
+class CamHelperImx296 : public CamHelper
+{
+public:
+ CamHelperImx296();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ uint32_t exposureLines(const Duration exposure, const Duration lineLength) const override;
+ Duration exposure(uint32_t exposureLines, const Duration lineLength) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const override;
+
+private:
+ static constexpr uint32_t minExposureLines = 1;
+ static constexpr uint32_t maxGainCode = 239;
+ static constexpr Duration timePerLine = 550.0 / 37.125e6 * 1.0s;
+
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+};
+
+CamHelperImx296::CamHelperImx296()
+ : CamHelper(nullptr, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx296::gainCode(double gain) const
+{
+ uint32_t code = 20 * std::log10(gain) * 10;
+ return std::min(code, maxGainCode);
+}
+
+double CamHelperImx296::gain(uint32_t gainCode) const
+{
+ return std::pow(10.0, gainCode / 200.0);
+}
+
+uint32_t CamHelperImx296::exposureLines(const Duration exposure,
+ [[maybe_unused]] const Duration lineLength) const
+{
+ return std::max<uint32_t>(minExposureLines, (exposure - 14.26us) / timePerLine);
+}
+
+Duration CamHelperImx296::exposure(uint32_t exposureLines,
+ [[maybe_unused]] const Duration lineLength) const
+{
+ return std::max<uint32_t>(minExposureLines, exposureLines) * timePerLine + 14.26us;
+}
+
+void CamHelperImx296::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
+{
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 2;
+ hblankDelay = 2;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx296();
+}
+
+static RegisterCamHelper reg("imx296", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_imx477.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp
index 0e1c0dbd..6bd89334 100644
--- a/src/ipa/raspberrypi/cam_helper_imx477.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * cam_helper_imx477.cpp - camera helper for imx477 sensor
+ * camera helper for imx477 sensor
*/
#include <algorithm>
@@ -14,8 +14,8 @@
#include <libcamera/base/log.h>
-#include "cam_helper.hpp"
-#include "md_parser.hpp"
+#include "cam_helper.h"
+#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
@@ -35,22 +35,25 @@ constexpr uint32_t gainHiReg = 0x0204;
constexpr uint32_t gainLoReg = 0x0205;
constexpr uint32_t frameLengthHiReg = 0x0340;
constexpr uint32_t frameLengthLoReg = 0x0341;
+constexpr uint32_t lineLengthHiReg = 0x0342;
+constexpr uint32_t lineLengthLoReg = 0x0343;
constexpr uint32_t temperatureReg = 0x013a;
constexpr std::initializer_list<uint32_t> registerList =
- { expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg, temperatureReg };
+ { expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg,
+ lineLengthHiReg, lineLengthLoReg, temperatureReg };
class CamHelperImx477 : public CamHelper
{
public:
CamHelperImx477();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void Prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
- uint32_t GetVBlanking(Duration &exposure, Duration minFrameDuration,
- Duration maxFrameDuration) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
- bool SensorEmbeddedDataPresent() const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
+ std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
+ Duration maxFrameDuration) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const override;
+ bool sensorEmbeddedDataPresent() const override;
private:
/*
@@ -63,7 +66,7 @@ private:
/* Largest long exposure scale factor given as a left shift on the frame length. */
static constexpr int longExposureShiftMax = 7;
- void PopulateMetadata(const MdParser::RegisterMap &registers,
+ void populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const override;
};
@@ -72,22 +75,22 @@ CamHelperImx477::CamHelperImx477()
{
}
-uint32_t CamHelperImx477::GainCode(double gain) const
+uint32_t CamHelperImx477::gainCode(double gain) const
{
return static_cast<uint32_t>(1024 - 1024 / gain);
}
-double CamHelperImx477::Gain(uint32_t gain_code) const
+double CamHelperImx477::gain(uint32_t gainCode) const
{
- return 1024.0 / (1024 - gain_code);
+ return 1024.0 / (1024 - gainCode);
}
-void CamHelperImx477::Prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
+void CamHelperImx477::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
{
MdParser::RegisterMap registers;
DeviceStatus deviceStatus;
- if (metadata.Get("device.status", deviceStatus)) {
+ if (metadata.get("device.status", deviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
return;
}
@@ -105,28 +108,32 @@ void CamHelperImx477::Prepare(libcamera::Span<const uint8_t> buffer, Metadata &m
* Otherwise, all values are updated with what is reported in the
* embedded data.
*/
- if (deviceStatus.frame_length > frameLengthMax) {
+ if (deviceStatus.frameLength > frameLengthMax) {
DeviceStatus parsedDeviceStatus;
- metadata.Get("device.status", parsedDeviceStatus);
- parsedDeviceStatus.shutter_speed = deviceStatus.shutter_speed;
- parsedDeviceStatus.frame_length = deviceStatus.frame_length;
- metadata.Set("device.status", parsedDeviceStatus);
+ metadata.get("device.status", parsedDeviceStatus);
+ parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
+ parsedDeviceStatus.frameLength = deviceStatus.frameLength;
+ metadata.set("device.status", parsedDeviceStatus);
LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
<< parsedDeviceStatus;
}
}
-uint32_t CamHelperImx477::GetVBlanking(Duration &exposure,
- Duration minFrameDuration,
- Duration maxFrameDuration) const
+std::pair<uint32_t, uint32_t> CamHelperImx477::getBlanking(Duration &exposure,
+ Duration minFrameDuration,
+ Duration maxFrameDuration) const
{
uint32_t frameLength, exposureLines;
unsigned int shift = 0;
- frameLength = mode_.height + CamHelper::GetVBlanking(exposure, minFrameDuration,
- maxFrameDuration);
+ auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
+ maxFrameDuration);
+
+ frameLength = mode_.height + vblank;
+ Duration lineLength = hblankToLineLength(hblank);
+
/*
* Check if the frame length calculated needs to be setup for long
* exposure mode. This will require us to use a long exposure scale
@@ -144,43 +151,47 @@ uint32_t CamHelperImx477::GetVBlanking(Duration &exposure,
if (shift) {
/* Account for any rounding in the scaled frame length value. */
frameLength <<= shift;
- exposureLines = ExposureLines(exposure);
+ exposureLines = CamHelperImx477::exposureLines(exposure, lineLength);
exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
- exposure = Exposure(exposureLines);
+ exposure = CamHelperImx477::exposure(exposureLines, lineLength);
}
- return frameLength - mode_.height;
+ return { frameLength - mode_.height, hblank };
}
-void CamHelperImx477::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelperImx477::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
{
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 3;
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 3;
+ hblankDelay = 3;
}
-bool CamHelperImx477::SensorEmbeddedDataPresent() const
+bool CamHelperImx477::sensorEmbeddedDataPresent() const
{
return true;
}
-void CamHelperImx477::PopulateMetadata(const MdParser::RegisterMap &registers,
+void CamHelperImx477::populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
- deviceStatus.shutter_speed = Exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
- deviceStatus.analogue_gain = Gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
- deviceStatus.frame_length = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
- deviceStatus.sensor_temperature = std::clamp<int8_t>(registers.at(temperatureReg), -20, 80);
+ deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
+ registers.at(lineLengthLoReg));
+ deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.lineLength);
+ deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.sensorTemperature = std::clamp<int8_t>(registers.at(temperatureReg), -20, 80);
- metadata.Set("device.status", deviceStatus);
+ metadata.set("device.status", deviceStatus);
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperImx477();
}
-static RegisterCamHelper reg("imx477", &Create);
+static RegisterCamHelper reg("imx477", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_imx519.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp
index eaf24982..c2de3d40 100644
--- a/src/ipa/raspberrypi/cam_helper_imx519.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Based on cam_helper_imx477.cpp
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * cam_helper_imx519.cpp - camera helper for imx519 sensor
+ * camera helper for imx519 sensor
* Copyright (C) 2021, Arducam Technology co., Ltd.
*/
@@ -15,8 +15,8 @@
#include <libcamera/base/log.h>
-#include "cam_helper.hpp"
-#include "md_parser.hpp"
+#include "cam_helper.h"
+#include "md_parser.h"
using namespace RPiController;
using namespace libcamera;
@@ -36,21 +36,24 @@ constexpr uint32_t gainHiReg = 0x0204;
constexpr uint32_t gainLoReg = 0x0205;
constexpr uint32_t frameLengthHiReg = 0x0340;
constexpr uint32_t frameLengthLoReg = 0x0341;
+constexpr uint32_t lineLengthHiReg = 0x0342;
+constexpr uint32_t lineLengthLoReg = 0x0343;
constexpr std::initializer_list<uint32_t> registerList =
- { expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg };
+ { expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg,
+ lineLengthHiReg, lineLengthLoReg };
class CamHelperImx519 : public CamHelper
{
public:
CamHelperImx519();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void Prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
- uint32_t GetVBlanking(Duration &exposure, Duration minFrameDuration,
- Duration maxFrameDuration) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
- bool SensorEmbeddedDataPresent() const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
+ std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
+ Duration maxFrameDuration) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const override;
+ bool sensorEmbeddedDataPresent() const override;
private:
/*
@@ -63,7 +66,7 @@ private:
/* Largest long exposure scale factor given as a left shift on the frame length. */
static constexpr int longExposureShiftMax = 7;
- void PopulateMetadata(const MdParser::RegisterMap &registers,
+ void populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const override;
};
@@ -72,22 +75,22 @@ CamHelperImx519::CamHelperImx519()
{
}
-uint32_t CamHelperImx519::GainCode(double gain) const
+uint32_t CamHelperImx519::gainCode(double gain) const
{
return static_cast<uint32_t>(1024 - 1024 / gain);
}
-double CamHelperImx519::Gain(uint32_t gain_code) const
+double CamHelperImx519::gain(uint32_t gainCode) const
{
- return 1024.0 / (1024 - gain_code);
+ return 1024.0 / (1024 - gainCode);
}
-void CamHelperImx519::Prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
+void CamHelperImx519::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
{
MdParser::RegisterMap registers;
DeviceStatus deviceStatus;
- if (metadata.Get("device.status", deviceStatus)) {
+ if (metadata.get("device.status", deviceStatus)) {
LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
return;
}
@@ -105,28 +108,32 @@ void CamHelperImx519::Prepare(libcamera::Span<const uint8_t> buffer, Metadata &m
* Otherwise, all values are updated with what is reported in the
* embedded data.
*/
- if (deviceStatus.frame_length > frameLengthMax) {
+ if (deviceStatus.frameLength > frameLengthMax) {
DeviceStatus parsedDeviceStatus;
- metadata.Get("device.status", parsedDeviceStatus);
- parsedDeviceStatus.shutter_speed = deviceStatus.shutter_speed;
- parsedDeviceStatus.frame_length = deviceStatus.frame_length;
- metadata.Set("device.status", parsedDeviceStatus);
+ metadata.get("device.status", parsedDeviceStatus);
+ parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
+ parsedDeviceStatus.frameLength = deviceStatus.frameLength;
+ metadata.set("device.status", parsedDeviceStatus);
LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
<< parsedDeviceStatus;
}
}
-uint32_t CamHelperImx519::GetVBlanking(Duration &exposure,
- Duration minFrameDuration,
- Duration maxFrameDuration) const
+std::pair<uint32_t, uint32_t> CamHelperImx519::getBlanking(Duration &exposure,
+ Duration minFrameDuration,
+ Duration maxFrameDuration) const
{
uint32_t frameLength, exposureLines;
unsigned int shift = 0;
- frameLength = mode_.height + CamHelper::GetVBlanking(exposure, minFrameDuration,
- maxFrameDuration);
+ auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
+ maxFrameDuration);
+
+ frameLength = mode_.height + vblank;
+ Duration lineLength = hblankToLineLength(hblank);
+
/*
* Check if the frame length calculated needs to be setup for long
* exposure mode. This will require us to use a long exposure scale
@@ -144,42 +151,46 @@ uint32_t CamHelperImx519::GetVBlanking(Duration &exposure,
if (shift) {
/* Account for any rounding in the scaled frame length value. */
frameLength <<= shift;
- exposureLines = ExposureLines(exposure);
+ exposureLines = CamHelperImx519::exposureLines(exposure, lineLength);
exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
- exposure = Exposure(exposureLines);
+ exposure = CamHelperImx519::exposure(exposureLines, lineLength);
}
- return frameLength - mode_.height;
+ return { frameLength - mode_.height, hblank };
}
-void CamHelperImx519::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelperImx519::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
{
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 3;
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 3;
+ hblankDelay = 3;
}
-bool CamHelperImx519::SensorEmbeddedDataPresent() const
+bool CamHelperImx519::sensorEmbeddedDataPresent() const
{
return true;
}
-void CamHelperImx519::PopulateMetadata(const MdParser::RegisterMap &registers,
+void CamHelperImx519::populateMetadata(const MdParser::RegisterMap &registers,
Metadata &metadata) const
{
DeviceStatus deviceStatus;
- deviceStatus.shutter_speed = Exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg));
- deviceStatus.analogue_gain = Gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
- deviceStatus.frame_length = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
+ registers.at(lineLengthLoReg));
+ deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.lineLength);
+ deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
- metadata.Set("device.status", deviceStatus);
+ metadata.set("device.status", deviceStatus);
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperImx519();
}
-static RegisterCamHelper reg("imx519", &Create);
+static RegisterCamHelper reg("imx519", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp
new file mode 100644
index 00000000..63ddb55e
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp
@@ -0,0 +1,382 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * camera helper for imx708 sensor
+ */
+
+#include <cmath>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <libcamera/base/log.h>
+
+#include "controller/pdaf_data.h"
+
+#include "cam_helper.h"
+#include "md_parser.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+
+using namespace std::literals::chrono_literals;
+
+namespace libcamera {
+LOG_DECLARE_CATEGORY(IPARPI)
+}
+
+/*
+ * We care about two gain registers and a pair of exposure registers. Their
+ * I2C addresses from the Sony imx708 datasheet:
+ */
+constexpr uint32_t expHiReg = 0x0202;
+constexpr uint32_t expLoReg = 0x0203;
+constexpr uint32_t gainHiReg = 0x0204;
+constexpr uint32_t gainLoReg = 0x0205;
+constexpr uint32_t frameLengthHiReg = 0x0340;
+constexpr uint32_t frameLengthLoReg = 0x0341;
+constexpr uint32_t lineLengthHiReg = 0x0342;
+constexpr uint32_t lineLengthLoReg = 0x0343;
+constexpr uint32_t temperatureReg = 0x013a;
+constexpr std::initializer_list<uint32_t> registerList =
+ { expHiReg, expLoReg, gainHiReg, gainLoReg, lineLengthHiReg,
+ lineLengthLoReg, frameLengthHiReg, frameLengthLoReg, temperatureReg };
+
+class CamHelperImx708 : public CamHelper
+{
+public:
+ CamHelperImx708();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gain_code) const override;
+ void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
+ void process(StatisticsPtr &stats, Metadata &metadata) override;
+ std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
+ Duration maxFrameDuration) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const override;
+ bool sensorEmbeddedDataPresent() const override;
+ double getModeSensitivity(const CameraMode &mode) const override;
+ unsigned int hideFramesModeSwitch() const override;
+ unsigned int hideFramesStartup() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 22;
+ /* Maximum frame length allowable for long exposure calculations. */
+ static constexpr int frameLengthMax = 0xffdc;
+ /* Largest long exposure scale factor given as a left shift on the frame length. */
+ static constexpr int longExposureShiftMax = 7;
+
+ static constexpr int pdafStatsRows = 12;
+ static constexpr int pdafStatsCols = 16;
+
+ void populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const override;
+
+ static bool parsePdafData(const uint8_t *ptr, size_t len, unsigned bpp,
+ PdafRegions &pdaf);
+
+ bool parseAEHist(const uint8_t *ptr, size_t len, unsigned bpp);
+ void putAGCStatistics(StatisticsPtr stats);
+
+ Histogram aeHistLinear_;
+ uint32_t aeHistAverage_;
+ bool aeHistValid_;
+};
+
+CamHelperImx708::CamHelperImx708()
+ : CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff),
+ aeHistLinear_{}, aeHistAverage_(0), aeHistValid_(false)
+{
+}
+
+uint32_t CamHelperImx708::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(1024 - 1024 / gain);
+}
+
+double CamHelperImx708::gain(uint32_t gain_code) const
+{
+ return 1024.0 / (1024 - gain_code);
+}
+
+void CamHelperImx708::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
+{
+ MdParser::RegisterMap registers;
+ DeviceStatus deviceStatus;
+
+ LOG(IPARPI, Debug) << "Embedded buffer size: " << buffer.size();
+
+ if (metadata.get("device.status", deviceStatus)) {
+ LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
+ return;
+ }
+
+ parseEmbeddedData(buffer, metadata);
+
+ /*
+ * Parse PDAF data, which we expect to occupy the third scanline
+ * of embedded data. As PDAF is quite sensor-specific, it's parsed here.
+ */
+ size_t bytesPerLine = (mode_.width * mode_.bitdepth) >> 3;
+
+ if (buffer.size() > 2 * bytesPerLine) {
+ PdafRegions pdaf;
+ if (parsePdafData(&buffer[2 * bytesPerLine],
+ buffer.size() - 2 * bytesPerLine,
+ mode_.bitdepth, pdaf))
+ metadata.set("pdaf.regions", pdaf);
+ }
+
+ /* Parse AE-HIST data where present */
+ if (buffer.size() > 3 * bytesPerLine) {
+ aeHistValid_ = parseAEHist(&buffer[3 * bytesPerLine],
+ buffer.size() - 3 * bytesPerLine,
+ mode_.bitdepth);
+ }
+
+ /*
+ * The DeviceStatus struct is first populated with values obtained from
+ * DelayedControls. If this reports frame length is > frameLengthMax,
+ * it means we are using a long exposure mode. Since the long exposure
+ * scale factor is not returned back through embedded data, we must rely
+ * on the existing exposure lines and frame length values returned by
+ * DelayedControls.
+ *
+ * Otherwise, all values are updated with what is reported in the
+ * embedded data.
+ */
+ if (deviceStatus.frameLength > frameLengthMax) {
+ DeviceStatus parsedDeviceStatus;
+
+ metadata.get("device.status", parsedDeviceStatus);
+ parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
+ parsedDeviceStatus.frameLength = deviceStatus.frameLength;
+ metadata.set("device.status", parsedDeviceStatus);
+
+ LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
+ << parsedDeviceStatus;
+ }
+}
+
+void CamHelperImx708::process(StatisticsPtr &stats, [[maybe_unused]] Metadata &metadata)
+{
+ if (aeHistValid_)
+ putAGCStatistics(stats);
+}
+
+std::pair<uint32_t, uint32_t> CamHelperImx708::getBlanking(Duration &exposure,
+ Duration minFrameDuration,
+ Duration maxFrameDuration) const
+{
+ uint32_t frameLength, exposureLines;
+ unsigned int shift = 0;
+
+ auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
+ maxFrameDuration);
+
+ frameLength = mode_.height + vblank;
+ Duration lineLength = hblankToLineLength(hblank);
+
+ /*
+ * Check if the frame length calculated needs to be setup for long
+ * exposure mode. This will require us to use a long exposure scale
+ * factor provided by a shift operation in the sensor.
+ */
+ while (frameLength > frameLengthMax) {
+ if (++shift > longExposureShiftMax) {
+ shift = longExposureShiftMax;
+ frameLength = frameLengthMax;
+ break;
+ }
+ frameLength >>= 1;
+ }
+
+ if (shift) {
+ /* Account for any rounding in the scaled frame length value. */
+ frameLength <<= shift;
+ exposureLines = CamHelper::exposureLines(exposure, lineLength);
+ exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
+ exposure = CamHelper::exposure(exposureLines, lineLength);
+ }
+
+ return { frameLength - mode_.height, hblank };
+}
+
+void CamHelperImx708::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
+{
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 3;
+ hblankDelay = 3;
+}
+
+bool CamHelperImx708::sensorEmbeddedDataPresent() const
+{
+ return true;
+}
+
+double CamHelperImx708::getModeSensitivity(const CameraMode &mode) const
+{
+ /* In binned modes, sensitivity increases by a factor of 2 */
+ return (mode.width > 2304) ? 1.0 : 2.0;
+}
+
+unsigned int CamHelperImx708::hideFramesModeSwitch() const
+{
+ /*
+ * We need to drop the first startup frame in HDR mode.
+ * Unfortunately the only way to currently determine if the sensor is in
+ * the HDR mode is to match with the resolution and framerate - the HDR
+ * mode only runs upto 30fps.
+ */
+ if (mode_.width == 2304 && mode_.height == 1296 &&
+ mode_.minFrameDuration > 1.0s / 32)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int CamHelperImx708::hideFramesStartup() const
+{
+ return hideFramesModeSwitch();
+}
+
+void CamHelperImx708::populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const
+{
+ DeviceStatus deviceStatus;
+
+ deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
+ registers.at(lineLengthLoReg));
+ deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.lineLength);
+ deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.sensorTemperature = std::clamp<int8_t>(registers.at(temperatureReg), -20, 80);
+
+ metadata.set("device.status", deviceStatus);
+}
+
+bool CamHelperImx708::parsePdafData(const uint8_t *ptr, size_t len,
+ unsigned bpp, PdafRegions &pdaf)
+{
+ size_t step = bpp >> 1; /* bytes per PDAF grid entry */
+
+ if (bpp < 10 || bpp > 14 || len < 194 * step || ptr[0] != 0 || ptr[1] >= 0x40) {
+ LOG(IPARPI, Error) << "PDAF data in unsupported format";
+ return false;
+ }
+
+ pdaf.init({ pdafStatsCols, pdafStatsRows });
+
+ ptr += 2 * step;
+ for (unsigned i = 0; i < pdafStatsRows; ++i) {
+ for (unsigned j = 0; j < pdafStatsCols; ++j) {
+ unsigned c = (ptr[0] << 3) | (ptr[1] >> 5);
+ int p = (((ptr[1] & 0x0F) - (ptr[1] & 0x10)) << 6) | (ptr[2] >> 2);
+ PdafData pdafData;
+ pdafData.conf = c;
+ pdafData.phase = c ? p : 0;
+ pdaf.set(libcamera::Point(j, i), { pdafData, 1, 0 });
+ ptr += step;
+ }
+ }
+
+ return true;
+}
+
+bool CamHelperImx708::parseAEHist(const uint8_t *ptr, size_t len, unsigned bpp)
+{
+ static constexpr unsigned int PipelineBits = Statistics::NormalisationFactorPow2;
+
+ uint64_t count = 0, sum = 0;
+ size_t step = bpp >> 1; /* bytes per histogram bin */
+ uint32_t hist[128];
+
+ if (len < 144 * step)
+ return false;
+
+ /*
+ * Read the 128 bin linear histogram, which by default covers
+ * the full range of the HDR shortest exposure (small values are
+ * expected to dominate, so pixel-value resolution will be poor).
+ */
+ for (unsigned i = 0; i < 128; ++i) {
+ if (ptr[3] != 0x55)
+ return false;
+ uint32_t c = (ptr[0] << 14) + (ptr[1] << 6) + (ptr[2] >> 2);
+ hist[i] = c >> 2; /* pixels to quads */
+ if (i != 0) {
+ count += c;
+ sum += c *
+ (i * (1u << (PipelineBits - 7)) +
+ (1u << (PipelineBits - 8)));
+ }
+ ptr += step;
+ }
+
+ /*
+ * Now use the first 9 bins of the log histogram (these should be
+ * subdivisions of the smallest linear bin), to get a more accurate
+ * average value. Don't assume that AEHIST1_AVERAGE is present.
+ */
+ for (unsigned i = 0; i < 9; ++i) {
+ if (ptr[3] != 0x55)
+ return false;
+ uint32_t c = (ptr[0] << 14) + (ptr[1] << 6) + (ptr[2] >> 2);
+ count += c;
+ sum += c *
+ ((3u << PipelineBits) >> (17 - i));
+ ptr += step;
+ }
+ if ((unsigned)((ptr[0] << 12) + (ptr[1] << 4) + (ptr[2] >> 4)) !=
+ hist[1]) {
+ LOG(IPARPI, Error) << "Lin/Log histogram mismatch";
+ return false;
+ }
+
+ aeHistLinear_ = Histogram(hist, 128);
+ aeHistAverage_ = count ? (sum / count) : 0;
+
+ return count != 0;
+}
+
+void CamHelperImx708::putAGCStatistics(StatisticsPtr stats)
+{
+ /*
+ * For HDR mode, copy sensor's AE/AGC statistics over ISP's, so the
+ * AGC algorithm sees a linear response to exposure and gain changes.
+ *
+ * Histogram: Just copy the "raw" histogram over the tone-mapped one,
+ * although they have different distributions (raw values are lower).
+ * Tuning should either ignore it, or constrain for highlights only.
+ *
+ * Average: Overwrite all regional averages with a global raw average,
+ * scaled by a fiddle-factor so that a conventional (non-HDR) y_target
+ * of e.g. 0.17 will map to a suitable level for HDR.
+ */
+ stats->yHist = aeHistLinear_;
+
+ constexpr unsigned int HdrHeadroomFactor = 4;
+ uint64_t v = HdrHeadroomFactor * aeHistAverage_;
+ for (auto &region : stats->agcRegions) {
+ region.val.rSum = region.val.gSum = region.val.bSum = region.counted * v;
+ }
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx708();
+}
+
+static RegisterCamHelper reg("imx708", &create);
+static RegisterCamHelper regWide("imx708_wide", &create);
+static RegisterCamHelper regNoIr("imx708_noir", &create);
+static RegisterCamHelper regWideNoIr("imx708_wide_noir", &create);
diff --git a/src/ipa/raspberrypi/cam_helper_ov5647.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp
index 702c2d07..c30b017c 100644
--- a/src/ipa/raspberrypi/cam_helper_ov5647.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * cam_helper_ov5647.cpp - camera information for ov5647 sensor
+ * camera information for ov5647 sensor
*/
#include <assert.h>
-#include "cam_helper.hpp"
+#include "cam_helper.h"
using namespace RPiController;
@@ -15,14 +15,14 @@ class CamHelperOv5647 : public CamHelper
{
public:
CamHelperOv5647();
- uint32_t GainCode(double gain) const override;
- double Gain(uint32_t gain_code) const override;
- void GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const override;
- unsigned int HideFramesStartup() const override;
- unsigned int HideFramesModeSwitch() const override;
- unsigned int MistrustFramesStartup() const override;
- unsigned int MistrustFramesModeSwitch() const override;
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const override;
+ unsigned int hideFramesStartup() const override;
+ unsigned int hideFramesModeSwitch() const override;
+ unsigned int mistrustFramesStartup() const override;
+ unsigned int mistrustFramesModeSwitch() const override;
private:
/*
@@ -42,29 +42,30 @@ CamHelperOv5647::CamHelperOv5647()
{
}
-uint32_t CamHelperOv5647::GainCode(double gain) const
+uint32_t CamHelperOv5647::gainCode(double gain) const
{
return static_cast<uint32_t>(gain * 16.0);
}
-double CamHelperOv5647::Gain(uint32_t gain_code) const
+double CamHelperOv5647::gain(uint32_t gainCode) const
{
- return static_cast<double>(gain_code) / 16.0;
+ return static_cast<double>(gainCode) / 16.0;
}
-void CamHelperOv5647::GetDelays(int &exposure_delay, int &gain_delay,
- int &vblank_delay) const
+void CamHelperOv5647::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
{
/*
* We run this sensor in a mode where the gain delay is bumped up to
* 2. It seems to be the only way to make the delays "predictable".
*/
- exposure_delay = 2;
- gain_delay = 2;
- vblank_delay = 2;
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 2;
+ hblankDelay = 2;
}
-unsigned int CamHelperOv5647::HideFramesStartup() const
+unsigned int CamHelperOv5647::hideFramesStartup() const
{
/*
* On startup, we get a couple of under-exposed frames which
@@ -73,7 +74,7 @@ unsigned int CamHelperOv5647::HideFramesStartup() const
return 2;
}
-unsigned int CamHelperOv5647::HideFramesModeSwitch() const
+unsigned int CamHelperOv5647::hideFramesModeSwitch() const
{
/*
* After a mode switch, we get a couple of under-exposed frames which
@@ -82,7 +83,7 @@ unsigned int CamHelperOv5647::HideFramesModeSwitch() const
return 2;
}
-unsigned int CamHelperOv5647::MistrustFramesStartup() const
+unsigned int CamHelperOv5647::mistrustFramesStartup() const
{
/*
* First couple of frames are under-exposed and are no good for control
@@ -91,7 +92,7 @@ unsigned int CamHelperOv5647::MistrustFramesStartup() const
return 2;
}
-unsigned int CamHelperOv5647::MistrustFramesModeSwitch() const
+unsigned int CamHelperOv5647::mistrustFramesModeSwitch() const
{
/*
* First couple of frames are under-exposed even after a simple
@@ -100,9 +101,9 @@ unsigned int CamHelperOv5647::MistrustFramesModeSwitch() const
return 2;
}
-static CamHelper *Create()
+static CamHelper *create()
{
return new CamHelperOv5647();
}
-static RegisterCamHelper reg("ov5647", &Create);
+static RegisterCamHelper reg("ov5647", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp
new file mode 100644
index 00000000..a8efd389
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ * Copyright (C) 2023, Ideas on Board Oy.
+ *
+ * camera information for ov64a40 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperOv64a40 : public CamHelper
+{
+public:
+ CamHelperOv64a40();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const override;
+ double getModeSensitivity(const CameraMode &mode) const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 32;
+};
+
+CamHelperOv64a40::CamHelperOv64a40()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperOv64a40::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(gain * 128.0);
+}
+
+double CamHelperOv64a40::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(gainCode) / 128.0;
+}
+
+void CamHelperOv64a40::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
+{
+ /* The driver appears to behave as follows: */
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 2;
+ hblankDelay = 2;
+}
+
+double CamHelperOv64a40::getModeSensitivity(const CameraMode &mode) const
+{
+ if (mode.binX >= 2 && mode.scaleX >= 4) {
+ return 4.0;
+ } else if (mode.binX >= 2 && mode.scaleX >= 2) {
+ return 2.0;
+ } else {
+ return 1.0;
+ }
+}
+
+static CamHelper *create()
+{
+ return new CamHelperOv64a40();
+}
+
+static RegisterCamHelper reg("ov64a40", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp
new file mode 100644
index 00000000..a65c8ac0
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * camera information for ov9281 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperOv9281 : public CamHelper
+{
+public:
+ CamHelperOv9281();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+};
+
+/*
+ * OV9281 doesn't output metadata, so we have to use the "unicam parser" which
+ * works by counting frames.
+ */
+
+CamHelperOv9281::CamHelperOv9281()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperOv9281::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(gain * 16.0);
+}
+
+double CamHelperOv9281::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(gainCode) / 16.0;
+}
+
+void CamHelperOv9281::getDelays(int &exposureDelay, int &gainDelay,
+ int &vblankDelay, int &hblankDelay) const
+{
+ /* The driver appears to behave as follows: */
+ exposureDelay = 2;
+ gainDelay = 2;
+ vblankDelay = 2;
+ hblankDelay = 2;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperOv9281();
+}
+
+static RegisterCamHelper reg("ov9281", &create);
diff --git a/src/ipa/raspberrypi/md_parser.hpp b/src/ipa/rpi/cam_helper/md_parser.h
index d32d0f54..227c376c 100644
--- a/src/ipa/raspberrypi/md_parser.hpp
+++ b/src/ipa/rpi/cam_helper/md_parser.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * md_parser.hpp - image sensor metadata parser interface
+ * image sensor metadata parser interface
*/
#pragma once
@@ -75,40 +75,40 @@ public:
};
MdParser()
- : reset_(true), bits_per_pixel_(0), num_lines_(0), line_length_bytes_(0)
+ : reset_(true), bitsPerPixel_(0), numLines_(0), lineLengthBytes_(0)
{
}
virtual ~MdParser() = default;
- void Reset()
+ void reset()
{
reset_ = true;
}
- void SetBitsPerPixel(int bpp)
+ void setBitsPerPixel(int bpp)
{
- bits_per_pixel_ = bpp;
+ bitsPerPixel_ = bpp;
}
- void SetNumLines(unsigned int num_lines)
+ void setNumLines(unsigned int numLines)
{
- num_lines_ = num_lines;
+ numLines_ = numLines;
}
- void SetLineLengthBytes(unsigned int num_bytes)
+ void setLineLengthBytes(unsigned int numBytes)
{
- line_length_bytes_ = num_bytes;
+ lineLengthBytes_ = numBytes;
}
- virtual Status Parse(libcamera::Span<const uint8_t> buffer,
+ virtual Status parse(libcamera::Span<const uint8_t> buffer,
RegisterMap &registers) = 0;
protected:
bool reset_;
- int bits_per_pixel_;
- unsigned int num_lines_;
- unsigned int line_length_bytes_;
+ int bitsPerPixel_;
+ unsigned int numLines_;
+ unsigned int lineLengthBytes_;
};
/*
@@ -123,7 +123,7 @@ class MdParserSmia final : public MdParser
public:
MdParserSmia(std::initializer_list<uint32_t> registerList);
- MdParser::Status Parse(libcamera::Span<const uint8_t> buffer,
+ MdParser::Status parse(libcamera::Span<const uint8_t> buffer,
RegisterMap &registers) override;
private:
@@ -133,18 +133,18 @@ private:
/*
* Note that error codes > 0 are regarded as non-fatal; codes < 0
* indicate a bad data buffer. Status codes are:
- * PARSE_OK - found all registers, much happiness
- * MISSING_REGS - some registers found; should this be a hard error?
+ * ParseOk - found all registers, much happiness
+ * MissingRegs - some registers found; should this be a hard error?
* The remaining codes are all hard errors.
*/
enum ParseStatus {
- PARSE_OK = 0,
- MISSING_REGS = 1,
- NO_LINE_START = -1,
- ILLEGAL_TAG = -2,
- BAD_DUMMY = -3,
- BAD_LINE_END = -4,
- BAD_PADDING = -5
+ ParseOk = 0,
+ MissingRegs = 1,
+ NoLineStart = -1,
+ IllegalTag = -2,
+ BadDummy = -3,
+ BadLineEnd = -4,
+ BadPadding = -5
};
ParseStatus findRegs(libcamera::Span<const uint8_t> buffer);
@@ -152,4 +152,4 @@ private:
OffsetMap offsets_;
};
-} // namespace RPi
+} /* namespace RPi */
diff --git a/src/ipa/rpi/cam_helper/md_parser_smia.cpp b/src/ipa/rpi/cam_helper/md_parser_smia.cpp
new file mode 100644
index 00000000..c7bdcf94
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/md_parser_smia.cpp
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * SMIA specification based embedded data parser
+ */
+
+#include <libcamera/base/log.h>
+#include "md_parser.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+/*
+ * This function goes through the embedded data to find the offsets (not
+ * values!), in the data block, where the values of the given registers can
+ * subsequently be found.
+ *
+ * Embedded data tag bytes, from Sony IMX219 datasheet but general to all SMIA
+ * sensors, I think.
+ */
+
+constexpr unsigned int LineStart = 0x0a;
+constexpr unsigned int LineEndTag = 0x07;
+constexpr unsigned int RegHiBits = 0xaa;
+constexpr unsigned int RegLowBits = 0xa5;
+constexpr unsigned int RegValue = 0x5a;
+constexpr unsigned int RegSkip = 0x55;
+
+MdParserSmia::MdParserSmia(std::initializer_list<uint32_t> registerList)
+{
+ for (auto r : registerList)
+ offsets_[r] = {};
+}
+
+MdParser::Status MdParserSmia::parse(libcamera::Span<const uint8_t> buffer,
+ RegisterMap &registers)
+{
+ if (reset_) {
+ /*
+ * Search again through the metadata for all the registers
+ * requested.
+ */
+ ASSERT(bitsPerPixel_);
+
+ for (const auto &kv : offsets_)
+ offsets_[kv.first] = {};
+
+ ParseStatus ret = findRegs(buffer);
+ /*
+ * > 0 means "worked partially but parse again next time",
+ * < 0 means "hard error".
+ *
+ * In either case, we retry parsing on the next frame.
+ */
+ if (ret != ParseOk)
+ return ERROR;
+
+ reset_ = false;
+ }
+
+ /* Populate the register values requested. */
+ registers.clear();
+ for (const auto &[reg, offset] : offsets_) {
+ if (!offset) {
+ reset_ = true;
+ return NOTFOUND;
+ }
+ registers[reg] = buffer[offset.value()];
+ }
+
+ return OK;
+}
+
+MdParserSmia::ParseStatus MdParserSmia::findRegs(libcamera::Span<const uint8_t> buffer)
+{
+ ASSERT(offsets_.size());
+
+ if (buffer[0] != LineStart)
+ return NoLineStart;
+
+ unsigned int currentOffset = 1; /* after the LineStart */
+ unsigned int currentLineStart = 0, currentLine = 0;
+ unsigned int regNum = 0, regsDone = 0;
+
+ while (1) {
+ int tag = buffer[currentOffset++];
+
+ /* Non-dummy bytes come in even-sized blocks: skip can only ever follow tag */
+ while ((bitsPerPixel_ == 10 &&
+ (currentOffset + 1 - currentLineStart) % 5 == 0) ||
+ (bitsPerPixel_ == 12 &&
+ (currentOffset + 1 - currentLineStart) % 3 == 0) ||
+ (bitsPerPixel_ == 14 &&
+ (currentOffset - currentLineStart) % 7 >= 4)) {
+ if (buffer[currentOffset++] != RegSkip)
+ return BadDummy;
+ }
+
+ int dataByte = buffer[currentOffset++];
+
+ if (tag == LineEndTag) {
+ if (dataByte != LineEndTag)
+ return BadLineEnd;
+
+ if (numLines_ && ++currentLine == numLines_)
+ return MissingRegs;
+
+ if (lineLengthBytes_) {
+ currentOffset = currentLineStart + lineLengthBytes_;
+
+ /* Require whole line to be in the buffer (if buffer size set). */
+ if (buffer.size() &&
+ currentOffset + lineLengthBytes_ > buffer.size())
+ return MissingRegs;
+
+ if (buffer[currentOffset] != LineStart)
+ return NoLineStart;
+ } else {
+ /* allow a zero line length to mean "hunt for the next line" */
+ while (currentOffset < buffer.size() &&
+ buffer[currentOffset] != LineStart)
+ currentOffset++;
+
+ if (currentOffset == buffer.size())
+ return NoLineStart;
+ }
+
+ /* inc currentOffset to after LineStart */
+ currentLineStart = currentOffset++;
+ } else {
+ if (tag == RegHiBits)
+ regNum = (regNum & 0xff) | (dataByte << 8);
+ else if (tag == RegLowBits)
+ regNum = (regNum & 0xff00) | dataByte;
+ else if (tag == RegSkip)
+ regNum++;
+ else if (tag == RegValue) {
+ auto reg = offsets_.find(regNum);
+
+ if (reg != offsets_.end()) {
+ offsets_[regNum] = currentOffset - 1;
+
+ if (++regsDone == offsets_.size())
+ return ParseOk;
+ }
+ regNum++;
+ } else
+ return IllegalTag;
+ }
+ }
+}
diff --git a/src/ipa/rpi/cam_helper/meson.build b/src/ipa/rpi/cam_helper/meson.build
new file mode 100644
index 00000000..72625057
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: CC0-1.0
+
+rpi_ipa_cam_helper_sources = files([
+ 'cam_helper.cpp',
+ 'cam_helper_ov5647.cpp',
+ 'cam_helper_imx219.cpp',
+ 'cam_helper_imx290.cpp',
+ 'cam_helper_imx296.cpp',
+ 'cam_helper_imx477.cpp',
+ 'cam_helper_imx519.cpp',
+ 'cam_helper_imx708.cpp',
+ 'cam_helper_ov64a40.cpp',
+ 'cam_helper_ov9281.cpp',
+ 'md_parser_smia.cpp',
+])
+
+rpi_ipa_cam_helper_includes = [
+ include_directories('..'),
+]
+
+rpi_ipa_cam_helper_deps = [
+ libcamera_private,
+]
+
+rpi_ipa_cam_helper_lib = static_library('rpi_ipa_cam_helper', rpi_ipa_cam_helper_sources,
+ include_directories : rpi_ipa_cam_helper_includes,
+ dependencies : rpi_ipa_cam_helper_deps)
diff --git a/src/ipa/rpi/common/ipa_base.cpp b/src/ipa/rpi/common/ipa_base.cpp
new file mode 100644
index 00000000..61490bd6
--- /dev/null
+++ b/src/ipa/rpi/common/ipa_base.cpp
@@ -0,0 +1,1514 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Raspberry Pi IPA base class
+ */
+
+#include "ipa_base.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "controller/af_algorithm.h"
+#include "controller/af_status.h"
+#include "controller/agc_algorithm.h"
+#include "controller/awb_algorithm.h"
+#include "controller/awb_status.h"
+#include "controller/black_level_status.h"
+#include "controller/ccm_algorithm.h"
+#include "controller/ccm_status.h"
+#include "controller/contrast_algorithm.h"
+#include "controller/denoise_algorithm.h"
+#include "controller/hdr_algorithm.h"
+#include "controller/lux_status.h"
+#include "controller/sharpen_algorithm.h"
+#include "controller/statistics.h"
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+using utils::Duration;
+
+namespace {
+
+/* Number of frame length times to hold in the queue. */
+constexpr unsigned int FrameLengthsQueueSize = 10;
+
+/* Configure the sensor with these values initially. */
+constexpr double defaultAnalogueGain = 1.0;
+constexpr Duration defaultExposureTime = 20.0ms;
+constexpr Duration defaultMinFrameDuration = 1.0s / 30.0;
+constexpr Duration defaultMaxFrameDuration = 250.0s;
+
+/*
+ * Determine the minimum allowable inter-frame duration to run the controller
+ * algorithms. If the pipeline handler provider frames at a rate higher than this,
+ * we rate-limit the controller Prepare() and Process() calls to lower than or
+ * equal to this rate.
+ */
+constexpr Duration controllerMinFrameDuration = 1.0s / 30.0;
+
+/* List of controls handled by the Raspberry Pi IPA */
+const ControlInfoMap::Map ipaControls{
+ { &controls::AeEnable, ControlInfo(false, true) },
+ { &controls::ExposureTime, ControlInfo(0, 66666) },
+ { &controls::AnalogueGain, ControlInfo(1.0f, 16.0f) },
+ { &controls::AeMeteringMode, ControlInfo(controls::AeMeteringModeValues) },
+ { &controls::AeConstraintMode, ControlInfo(controls::AeConstraintModeValues) },
+ { &controls::AeExposureMode, ControlInfo(controls::AeExposureModeValues) },
+ { &controls::ExposureValue, ControlInfo(-8.0f, 8.0f, 0.0f) },
+ { &controls::AeFlickerMode, ControlInfo(static_cast<int>(controls::FlickerOff),
+ static_cast<int>(controls::FlickerManual),
+ static_cast<int>(controls::FlickerOff)) },
+ { &controls::AeFlickerPeriod, ControlInfo(100, 1000000) },
+ { &controls::Brightness, ControlInfo(-1.0f, 1.0f, 0.0f) },
+ { &controls::Contrast, ControlInfo(0.0f, 32.0f, 1.0f) },
+ { &controls::HdrMode, ControlInfo(controls::HdrModeValues) },
+ { &controls::Sharpness, ControlInfo(0.0f, 16.0f, 1.0f) },
+ { &controls::ScalerCrop, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) },
+ { &controls::FrameDurationLimits, ControlInfo(INT64_C(33333), INT64_C(120000)) },
+ { &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) },
+ { &controls::rpi::StatsOutputEnable, ControlInfo(false, true) },
+};
+
+/* IPA controls handled conditionally, if the sensor is not mono */
+const ControlInfoMap::Map ipaColourControls{
+ { &controls::AwbEnable, ControlInfo(false, true) },
+ { &controls::AwbMode, ControlInfo(controls::AwbModeValues) },
+ { &controls::ColourGains, ControlInfo(0.0f, 32.0f) },
+ { &controls::Saturation, ControlInfo(0.0f, 32.0f, 1.0f) },
+};
+
+/* IPA controls handled conditionally, if the lens has a focus control */
+const ControlInfoMap::Map ipaAfControls{
+ { &controls::AfMode, ControlInfo(controls::AfModeValues) },
+ { &controls::AfRange, ControlInfo(controls::AfRangeValues) },
+ { &controls::AfSpeed, ControlInfo(controls::AfSpeedValues) },
+ { &controls::AfMetering, ControlInfo(controls::AfMeteringValues) },
+ { &controls::AfWindows, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) },
+ { &controls::AfTrigger, ControlInfo(controls::AfTriggerValues) },
+ { &controls::AfPause, ControlInfo(controls::AfPauseValues) },
+ { &controls::LensPosition, ControlInfo(0.0f, 32.0f, 1.0f) }
+};
+
+} /* namespace */
+
+LOG_DEFINE_CATEGORY(IPARPI)
+
+namespace ipa::RPi {
+
+IpaBase::IpaBase()
+ : controller_(), frameLengths_(FrameLengthsQueueSize, 0s), stitchSwapBuffers_(false), frameCount_(0),
+ mistrustCount_(0), lastRunTimestamp_(0), firstStart_(true), flickerState_({ 0, 0s })
+{
+}
+
+IpaBase::~IpaBase()
+{
+}
+
+int32_t IpaBase::init(const IPASettings &settings, const InitParams &params, InitResult *result)
+{
+ /*
+ * Load the "helper" for this sensor. This tells us all the device specific stuff
+ * that the kernel driver doesn't. We only do this the first time; we don't need
+ * to re-parse the metadata after a simple mode-switch for no reason.
+ */
+ helper_ = std::unique_ptr<RPiController::CamHelper>(RPiController::CamHelper::create(settings.sensorModel));
+ if (!helper_) {
+ LOG(IPARPI, Error) << "Could not create camera helper for "
+ << settings.sensorModel;
+ return -EINVAL;
+ }
+
+ /*
+ * Pass out the sensor config to the pipeline handler in order
+ * to setup the staggered writer class.
+ */
+ int gainDelay, exposureDelay, vblankDelay, hblankDelay, sensorMetadata;
+ helper_->getDelays(exposureDelay, gainDelay, vblankDelay, hblankDelay);
+ sensorMetadata = helper_->sensorEmbeddedDataPresent();
+
+ result->sensorConfig.gainDelay = gainDelay;
+ result->sensorConfig.exposureDelay = exposureDelay;
+ result->sensorConfig.vblankDelay = vblankDelay;
+ result->sensorConfig.hblankDelay = hblankDelay;
+ result->sensorConfig.sensorMetadata = sensorMetadata;
+
+ /* Load the tuning file for this sensor. */
+ int ret = controller_.read(settings.configurationFile.c_str());
+ if (ret) {
+ LOG(IPARPI, Error)
+ << "Failed to load tuning data file "
+ << settings.configurationFile;
+ return ret;
+ }
+
+ lensPresent_ = params.lensPresent;
+
+ controller_.initialise();
+
+ /* Return the controls handled by the IPA */
+ ControlInfoMap::Map ctrlMap = ipaControls;
+ if (lensPresent_)
+ ctrlMap.merge(ControlInfoMap::Map(ipaAfControls));
+
+ monoSensor_ = params.sensorInfo.cfaPattern == properties::draft::ColorFilterArrangementEnum::MONO;
+ if (!monoSensor_)
+ ctrlMap.merge(ControlInfoMap::Map(ipaColourControls));
+
+ result->controlInfo = ControlInfoMap(std::move(ctrlMap), controls::controls);
+
+ return platformInit(params, result);
+}
+
+int32_t IpaBase::configure(const IPACameraSensorInfo &sensorInfo, const ConfigParams &params,
+ ConfigResult *result)
+{
+ sensorCtrls_ = params.sensorControls;
+
+ if (!validateSensorControls()) {
+ LOG(IPARPI, Error) << "Sensor control validation failed.";
+ return -1;
+ }
+
+ if (lensPresent_) {
+ lensCtrls_ = params.lensControls;
+ if (!validateLensControls()) {
+ LOG(IPARPI, Warning) << "Lens validation failed, "
+ << "no lens control will be available.";
+ lensPresent_ = false;
+ }
+ }
+
+ /* Setup a metadata ControlList to output metadata. */
+ libcameraMetadata_ = ControlList(controls::controls);
+
+ /* Re-assemble camera mode using the sensor info. */
+ setMode(sensorInfo);
+
+ mode_.transform = static_cast<libcamera::Transform>(params.transform);
+
+ /* Pass the camera mode to the CamHelper to setup algorithms. */
+ helper_->setCameraMode(mode_);
+
+ /*
+ * Initialise this ControlList correctly, even if empty, in case the IPA is
+ * running is isolation mode (passing the ControlList through the IPC layer).
+ */
+ ControlList ctrls(sensorCtrls_);
+
+ /* The pipeline handler passes out the mode's sensitivity. */
+ result->modeSensitivity = mode_.sensitivity;
+
+ if (firstStart_) {
+ /* Supply initial values for frame durations. */
+ applyFrameDurations(defaultMinFrameDuration, defaultMaxFrameDuration);
+
+ /* Supply initial values for gain and exposure. */
+ AgcStatus agcStatus;
+ agcStatus.shutterTime = defaultExposureTime;
+ agcStatus.analogueGain = defaultAnalogueGain;
+ applyAGC(&agcStatus, ctrls);
+
+ /*
+ * Set the lens to the default (typically hyperfocal) position
+ * on first start.
+ */
+ if (lensPresent_) {
+ RPiController::AfAlgorithm *af =
+ dynamic_cast<RPiController::AfAlgorithm *>(controller_.getAlgorithm("af"));
+
+ if (af) {
+ float defaultPos =
+ ipaAfControls.at(&controls::LensPosition).def().get<float>();
+ ControlList lensCtrl(lensCtrls_);
+ int32_t hwpos;
+
+ af->setLensPosition(defaultPos, &hwpos);
+ lensCtrl.set(V4L2_CID_FOCUS_ABSOLUTE, hwpos);
+ result->lensControls = std::move(lensCtrl);
+ }
+ }
+ }
+
+ result->sensorControls = std::move(ctrls);
+
+ /*
+ * Apply the correct limits to the exposure, gain and frame duration controls
+ * based on the current sensor mode.
+ */
+ ControlInfoMap::Map ctrlMap = ipaControls;
+ ctrlMap[&controls::FrameDurationLimits] =
+ ControlInfo(static_cast<int64_t>(mode_.minFrameDuration.get<std::micro>()),
+ static_cast<int64_t>(mode_.maxFrameDuration.get<std::micro>()));
+
+ ctrlMap[&controls::AnalogueGain] =
+ ControlInfo(static_cast<float>(mode_.minAnalogueGain),
+ static_cast<float>(mode_.maxAnalogueGain));
+
+ ctrlMap[&controls::ExposureTime] =
+ ControlInfo(static_cast<int32_t>(mode_.minShutter.get<std::micro>()),
+ static_cast<int32_t>(mode_.maxShutter.get<std::micro>()));
+
+ /* Declare colour processing related controls for non-mono sensors. */
+ if (!monoSensor_)
+ ctrlMap.merge(ControlInfoMap::Map(ipaColourControls));
+
+ /* Declare Autofocus controls, only if we have a controllable lens */
+ if (lensPresent_)
+ ctrlMap.merge(ControlInfoMap::Map(ipaAfControls));
+
+ result->controlInfo = ControlInfoMap(std::move(ctrlMap), controls::controls);
+
+ return platformConfigure(params, result);
+}
+
+void IpaBase::start(const ControlList &controls, StartResult *result)
+{
+ RPiController::Metadata metadata;
+
+ if (!controls.empty()) {
+ /* We have been given some controls to action before start. */
+ applyControls(controls);
+ }
+
+ controller_.switchMode(mode_, &metadata);
+
+ /* Reset the frame lengths queue state. */
+ lastTimeout_ = 0s;
+ frameLengths_.clear();
+ frameLengths_.resize(FrameLengthsQueueSize, 0s);
+
+ /* SwitchMode may supply updated exposure/gain values to use. */
+ AgcStatus agcStatus;
+ agcStatus.shutterTime = 0.0s;
+ agcStatus.analogueGain = 0.0;
+
+ metadata.get("agc.status", agcStatus);
+ if (agcStatus.shutterTime && agcStatus.analogueGain) {
+ ControlList ctrls(sensorCtrls_);
+ applyAGC(&agcStatus, ctrls);
+ result->controls = std::move(ctrls);
+ setCameraTimeoutValue();
+ }
+ /* Make a note of this as it tells us the HDR status of the first few frames. */
+ hdrStatus_ = agcStatus.hdr;
+
+ /*
+ * Initialise frame counts, and decide how many frames must be hidden or
+ * "mistrusted", which depends on whether this is a startup from cold,
+ * or merely a mode switch in a running system.
+ */
+ frameCount_ = 0;
+ if (firstStart_) {
+ dropFrameCount_ = helper_->hideFramesStartup();
+ mistrustCount_ = helper_->mistrustFramesStartup();
+
+ /*
+ * Query the AGC/AWB for how many frames they may take to
+ * converge sufficiently. Where these numbers are non-zero
+ * we must allow for the frames with bad statistics
+ * (mistrustCount_) that they won't see. But if zero (i.e.
+ * no convergence necessary), no frames need to be dropped.
+ */
+ unsigned int agcConvergenceFrames = 0;
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (agc) {
+ agcConvergenceFrames = agc->getConvergenceFrames();
+ if (agcConvergenceFrames)
+ agcConvergenceFrames += mistrustCount_;
+ }
+
+ unsigned int awbConvergenceFrames = 0;
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (awb) {
+ awbConvergenceFrames = awb->getConvergenceFrames();
+ if (awbConvergenceFrames)
+ awbConvergenceFrames += mistrustCount_;
+ }
+
+ dropFrameCount_ = std::max({ dropFrameCount_, agcConvergenceFrames, awbConvergenceFrames });
+ LOG(IPARPI, Debug) << "Drop " << dropFrameCount_ << " frames on startup";
+ } else {
+ dropFrameCount_ = helper_->hideFramesModeSwitch();
+ mistrustCount_ = helper_->mistrustFramesModeSwitch();
+ }
+
+ result->dropFrameCount = dropFrameCount_;
+
+ firstStart_ = false;
+ lastRunTimestamp_ = 0;
+
+ platformStart(controls, result);
+}
+
+void IpaBase::mapBuffers(const std::vector<IPABuffer> &buffers)
+{
+ for (const IPABuffer &buffer : buffers) {
+ const FrameBuffer fb(buffer.planes);
+ buffers_.emplace(buffer.id,
+ MappedFrameBuffer(&fb, MappedFrameBuffer::MapFlag::ReadWrite));
+ }
+}
+
+void IpaBase::unmapBuffers(const std::vector<unsigned int> &ids)
+{
+ for (unsigned int id : ids) {
+ auto it = buffers_.find(id);
+ if (it == buffers_.end())
+ continue;
+
+ buffers_.erase(id);
+ }
+}
+
+void IpaBase::prepareIsp(const PrepareParams &params)
+{
+ applyControls(params.requestControls);
+
+ /*
+ * At start-up, or after a mode-switch, we may want to
+ * avoid running the control algos for a few frames in case
+ * they are "unreliable".
+ */
+ int64_t frameTimestamp = params.sensorControls.get(controls::SensorTimestamp).value_or(0);
+ unsigned int ipaContext = params.ipaContext % rpiMetadata_.size();
+ RPiController::Metadata &rpiMetadata = rpiMetadata_[ipaContext];
+ Span<uint8_t> embeddedBuffer;
+
+ rpiMetadata.clear();
+ fillDeviceStatus(params.sensorControls, ipaContext);
+
+ if (params.buffers.embedded) {
+ /*
+ * Pipeline handler has supplied us with an embedded data buffer,
+ * we must pass it to the CamHelper for parsing.
+ */
+ auto it = buffers_.find(params.buffers.embedded);
+ ASSERT(it != buffers_.end());
+ embeddedBuffer = it->second.planes()[0];
+ }
+
+ /*
+ * AGC wants to know the algorithm status from the time it actioned the
+ * sensor exposure/gain changes. So fetch it from the metadata list
+ * indexed by the IPA cookie returned, and put it in the current frame
+ * metadata.
+ *
+ * Note if the HDR mode has changed, as things like tonemaps may need updating.
+ */
+ AgcStatus agcStatus;
+ bool hdrChange = false;
+ RPiController::Metadata &delayedMetadata = rpiMetadata_[params.delayContext];
+ if (!delayedMetadata.get<AgcStatus>("agc.status", agcStatus)) {
+ rpiMetadata.set("agc.delayed_status", agcStatus);
+ hdrChange = agcStatus.hdr.mode != hdrStatus_.mode;
+ hdrStatus_ = agcStatus.hdr;
+ }
+
+ /*
+ * This may overwrite the DeviceStatus using values from the sensor
+ * metadata, and may also do additional custom processing.
+ */
+ helper_->prepare(embeddedBuffer, rpiMetadata);
+
+ /* Allow a 10% margin on the comparison below. */
+ Duration delta = (frameTimestamp - lastRunTimestamp_) * 1.0ns;
+ if (lastRunTimestamp_ && frameCount_ > dropFrameCount_ &&
+ delta < controllerMinFrameDuration * 0.9 && !hdrChange) {
+ /*
+ * Ensure we merge the previous frame's metadata with the current
+ * frame. This will not overwrite exposure/gain values for the
+ * current frame, or any other bits of metadata that were added
+ * in helper_->Prepare().
+ */
+ RPiController::Metadata &lastMetadata =
+ rpiMetadata_[(ipaContext ? ipaContext : rpiMetadata_.size()) - 1];
+ rpiMetadata.mergeCopy(lastMetadata);
+ processPending_ = false;
+ } else {
+ processPending_ = true;
+ lastRunTimestamp_ = frameTimestamp;
+ }
+
+ /*
+ * If the statistics are inline (i.e. already available with the Bayer
+ * frame), call processStats() now before prepare().
+ */
+ if (controller_.getHardwareConfig().statsInline)
+ processStats({ params.buffers, params.ipaContext });
+
+ /* Do we need/want to call prepare? */
+ if (processPending_) {
+ controller_.prepare(&rpiMetadata);
+ /* Actually prepare the ISP parameters for the frame. */
+ platformPrepareIsp(params, rpiMetadata);
+ }
+
+ frameCount_++;
+
+ /* If the statistics are inline the metadata can be returned early. */
+ if (controller_.getHardwareConfig().statsInline)
+ reportMetadata(ipaContext);
+
+ /* Ready to push the input buffer into the ISP. */
+ prepareIspComplete.emit(params.buffers, stitchSwapBuffers_);
+}
+
+void IpaBase::processStats(const ProcessParams &params)
+{
+ unsigned int ipaContext = params.ipaContext % rpiMetadata_.size();
+
+ if (processPending_ && frameCount_ >= mistrustCount_) {
+ RPiController::Metadata &rpiMetadata = rpiMetadata_[ipaContext];
+
+ auto it = buffers_.find(params.buffers.stats);
+ if (it == buffers_.end()) {
+ LOG(IPARPI, Error) << "Could not find stats buffer!";
+ return;
+ }
+
+ RPiController::StatisticsPtr statistics = platformProcessStats(it->second.planes()[0]);
+
+ /* reportMetadata() will pick this up and set the FocusFoM metadata */
+ rpiMetadata.set("focus.status", statistics->focusRegions);
+
+ helper_->process(statistics, rpiMetadata);
+ controller_.process(statistics, &rpiMetadata);
+
+ struct AgcStatus agcStatus;
+ if (rpiMetadata.get("agc.status", agcStatus) == 0) {
+ ControlList ctrls(sensorCtrls_);
+ applyAGC(&agcStatus, ctrls);
+ setDelayedControls.emit(ctrls, ipaContext);
+ setCameraTimeoutValue();
+ }
+ }
+
+ /*
+ * If the statistics are not inline the metadata must be returned now,
+ * before the processStatsComplete signal.
+ */
+ if (!controller_.getHardwareConfig().statsInline)
+ reportMetadata(ipaContext);
+
+ processStatsComplete.emit(params.buffers);
+}
+
+void IpaBase::setMode(const IPACameraSensorInfo &sensorInfo)
+{
+ mode_.bitdepth = sensorInfo.bitsPerPixel;
+ mode_.width = sensorInfo.outputSize.width;
+ mode_.height = sensorInfo.outputSize.height;
+ mode_.sensorWidth = sensorInfo.activeAreaSize.width;
+ mode_.sensorHeight = sensorInfo.activeAreaSize.height;
+ mode_.cropX = sensorInfo.analogCrop.x;
+ mode_.cropY = sensorInfo.analogCrop.y;
+ mode_.pixelRate = sensorInfo.pixelRate;
+
+ /*
+ * Calculate scaling parameters. The scale_[xy] factors are determined
+ * by the ratio between the crop rectangle size and the output size.
+ */
+ mode_.scaleX = sensorInfo.analogCrop.width / sensorInfo.outputSize.width;
+ mode_.scaleY = sensorInfo.analogCrop.height / sensorInfo.outputSize.height;
+
+ /*
+ * We're not told by the pipeline handler how scaling is split between
+ * binning and digital scaling. For now, as a heuristic, assume that
+ * downscaling up to 2 is achieved through binning, and that any
+ * additional scaling is achieved through digital scaling.
+ *
+ * \todo Get the pipeline handle to provide the full data
+ */
+ mode_.binX = std::min(2, static_cast<int>(mode_.scaleX));
+ mode_.binY = std::min(2, static_cast<int>(mode_.scaleY));
+
+ /* The noise factor is the square root of the total binning factor. */
+ mode_.noiseFactor = std::sqrt(mode_.binX * mode_.binY);
+
+ /*
+ * Calculate the line length as the ratio between the line length in
+ * pixels and the pixel rate.
+ */
+ mode_.minLineLength = sensorInfo.minLineLength * (1.0s / sensorInfo.pixelRate);
+ mode_.maxLineLength = sensorInfo.maxLineLength * (1.0s / sensorInfo.pixelRate);
+
+ /*
+ * Ensure that the maximum pixel processing rate does not exceed the ISP
+ * hardware capabilities. If it does, try adjusting the minimum line
+ * length to compensate if possible.
+ */
+ Duration minPixelTime = controller_.getHardwareConfig().minPixelProcessingTime;
+ Duration pixelTime = mode_.minLineLength / mode_.width;
+ if (minPixelTime && pixelTime < minPixelTime) {
+ Duration adjustedLineLength = minPixelTime * mode_.width;
+ if (adjustedLineLength <= mode_.maxLineLength) {
+ LOG(IPARPI, Info)
+ << "Adjusting mode minimum line length from " << mode_.minLineLength
+ << " to " << adjustedLineLength << " because of ISP constraints.";
+ mode_.minLineLength = adjustedLineLength;
+ } else {
+ LOG(IPARPI, Error)
+ << "Sensor minimum line length of " << pixelTime * mode_.width
+ << " (" << 1us / pixelTime << " MPix/s)"
+ << " is below the minimum allowable ISP limit of "
+ << adjustedLineLength
+ << " (" << 1us / minPixelTime << " MPix/s) ";
+ LOG(IPARPI, Error)
+ << "THIS WILL CAUSE IMAGE CORRUPTION!!! "
+ << "Please update the camera sensor driver to allow more horizontal blanking control.";
+ }
+ }
+
+ /*
+ * Set the frame length limits for the mode to ensure exposure and
+ * framerate calculations are clipped appropriately.
+ */
+ mode_.minFrameLength = sensorInfo.minFrameLength;
+ mode_.maxFrameLength = sensorInfo.maxFrameLength;
+
+ /* Store these for convenience. */
+ mode_.minFrameDuration = mode_.minFrameLength * mode_.minLineLength;
+ mode_.maxFrameDuration = mode_.maxFrameLength * mode_.maxLineLength;
+
+ /*
+ * Some sensors may have different sensitivities in different modes;
+ * the CamHelper will know the correct value.
+ */
+ mode_.sensitivity = helper_->getModeSensitivity(mode_);
+
+ const ControlInfo &gainCtrl = sensorCtrls_.at(V4L2_CID_ANALOGUE_GAIN);
+ const ControlInfo &shutterCtrl = sensorCtrls_.at(V4L2_CID_EXPOSURE);
+
+ mode_.minAnalogueGain = helper_->gain(gainCtrl.min().get<int32_t>());
+ mode_.maxAnalogueGain = helper_->gain(gainCtrl.max().get<int32_t>());
+
+ /*
+ * We need to give the helper the min/max frame durations so it can calculate
+ * the correct exposure limits below.
+ */
+ helper_->setCameraMode(mode_);
+
+ /* Shutter speed is calculated based on the limits of the frame durations. */
+ mode_.minShutter = helper_->exposure(shutterCtrl.min().get<int32_t>(), mode_.minLineLength);
+ mode_.maxShutter = Duration::max();
+ helper_->getBlanking(mode_.maxShutter,
+ mode_.minFrameDuration, mode_.maxFrameDuration);
+}
+
+void IpaBase::setCameraTimeoutValue()
+{
+ /*
+ * Take the maximum value of the exposure queue as the camera timeout
+ * value to pass back to the pipeline handler. Only signal if it has changed
+ * from the last set value.
+ */
+ auto max = std::max_element(frameLengths_.begin(), frameLengths_.end());
+
+ if (*max != lastTimeout_) {
+ setCameraTimeout.emit(max->get<std::milli>());
+ lastTimeout_ = *max;
+ }
+}
+
+bool IpaBase::validateSensorControls()
+{
+ static const uint32_t ctrls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_VBLANK,
+ V4L2_CID_HBLANK,
+ };
+
+ for (auto c : ctrls) {
+ if (sensorCtrls_.find(c) == sensorCtrls_.end()) {
+ LOG(IPARPI, Error) << "Unable to find sensor control "
+ << utils::hex(c);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool IpaBase::validateLensControls()
+{
+ if (lensCtrls_.find(V4L2_CID_FOCUS_ABSOLUTE) == lensCtrls_.end()) {
+ LOG(IPARPI, Error) << "Unable to find Lens control V4L2_CID_FOCUS_ABSOLUTE";
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Converting between enums (used in the libcamera API) and the names that
+ * we use to identify different modes. Unfortunately, the conversion tables
+ * must be kept up-to-date by hand.
+ */
+static const std::map<int32_t, std::string> MeteringModeTable = {
+ { controls::MeteringCentreWeighted, "centre-weighted" },
+ { controls::MeteringSpot, "spot" },
+ { controls::MeteringMatrix, "matrix" },
+ { controls::MeteringCustom, "custom" },
+};
+
+static const std::map<int32_t, std::string> ConstraintModeTable = {
+ { controls::ConstraintNormal, "normal" },
+ { controls::ConstraintHighlight, "highlight" },
+ { controls::ConstraintShadows, "shadows" },
+ { controls::ConstraintCustom, "custom" },
+};
+
+static const std::map<int32_t, std::string> ExposureModeTable = {
+ { controls::ExposureNormal, "normal" },
+ { controls::ExposureShort, "short" },
+ { controls::ExposureLong, "long" },
+ { controls::ExposureCustom, "custom" },
+};
+
+static const std::map<int32_t, std::string> AwbModeTable = {
+ { controls::AwbAuto, "auto" },
+ { controls::AwbIncandescent, "incandescent" },
+ { controls::AwbTungsten, "tungsten" },
+ { controls::AwbFluorescent, "fluorescent" },
+ { controls::AwbIndoor, "indoor" },
+ { controls::AwbDaylight, "daylight" },
+ { controls::AwbCloudy, "cloudy" },
+ { controls::AwbCustom, "custom" },
+};
+
+static const std::map<int32_t, RPiController::AfAlgorithm::AfMode> AfModeTable = {
+ { controls::AfModeManual, RPiController::AfAlgorithm::AfModeManual },
+ { controls::AfModeAuto, RPiController::AfAlgorithm::AfModeAuto },
+ { controls::AfModeContinuous, RPiController::AfAlgorithm::AfModeContinuous },
+};
+
+static const std::map<int32_t, RPiController::AfAlgorithm::AfRange> AfRangeTable = {
+ { controls::AfRangeNormal, RPiController::AfAlgorithm::AfRangeNormal },
+ { controls::AfRangeMacro, RPiController::AfAlgorithm::AfRangeMacro },
+ { controls::AfRangeFull, RPiController::AfAlgorithm::AfRangeFull },
+};
+
+static const std::map<int32_t, RPiController::AfAlgorithm::AfPause> AfPauseTable = {
+ { controls::AfPauseImmediate, RPiController::AfAlgorithm::AfPauseImmediate },
+ { controls::AfPauseDeferred, RPiController::AfAlgorithm::AfPauseDeferred },
+ { controls::AfPauseResume, RPiController::AfAlgorithm::AfPauseResume },
+};
+
+static const std::map<int32_t, std::string> HdrModeTable = {
+ { controls::HdrModeOff, "Off" },
+ { controls::HdrModeMultiExposureUnmerged, "MultiExposureUnmerged" },
+ { controls::HdrModeMultiExposure, "MultiExposure" },
+ { controls::HdrModeSingleExposure, "SingleExposure" },
+ { controls::HdrModeNight, "Night" },
+};
+
+void IpaBase::applyControls(const ControlList &controls)
+{
+ using RPiController::AgcAlgorithm;
+ using RPiController::AfAlgorithm;
+ using RPiController::ContrastAlgorithm;
+ using RPiController::DenoiseAlgorithm;
+ using RPiController::HdrAlgorithm;
+
+ /* Clear the return metadata buffer. */
+ libcameraMetadata_.clear();
+
+ /* Because some AF controls are mode-specific, handle AF mode change first. */
+ if (controls.contains(controls::AF_MODE)) {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_MODE - no AF algorithm";
+ }
+
+ int32_t idx = controls.get(controls::AF_MODE).get<int32_t>();
+ auto mode = AfModeTable.find(idx);
+ if (mode == AfModeTable.end()) {
+ LOG(IPARPI, Error) << "AF mode " << idx
+ << " not recognised";
+ } else if (af)
+ af->setMode(mode->second);
+ }
+
+ /* Iterate over controls */
+ for (auto const &ctrl : controls) {
+ LOG(IPARPI, Debug) << "Request ctrl: "
+ << controls::controls.at(ctrl.first)->name()
+ << " = " << ctrl.second.toString();
+
+ switch (ctrl.first) {
+ case controls::AE_ENABLE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AE_ENABLE - no AGC algorithm";
+ break;
+ }
+
+ if (ctrl.second.get<bool>() == false)
+ agc->disableAuto();
+ else
+ agc->enableAuto();
+
+ libcameraMetadata_.set(controls::AeEnable, ctrl.second.get<bool>());
+ break;
+ }
+
+ case controls::EXPOSURE_TIME: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set EXPOSURE_TIME - no AGC algorithm";
+ break;
+ }
+
+ /* The control provides units of microseconds. */
+ agc->setFixedShutter(0, ctrl.second.get<int32_t>() * 1.0us);
+
+ libcameraMetadata_.set(controls::ExposureTime, ctrl.second.get<int32_t>());
+ break;
+ }
+
+ case controls::ANALOGUE_GAIN: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set ANALOGUE_GAIN - no AGC algorithm";
+ break;
+ }
+
+ agc->setFixedAnalogueGain(0, ctrl.second.get<float>());
+
+ libcameraMetadata_.set(controls::AnalogueGain,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::AE_METERING_MODE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AE_METERING_MODE - no AGC algorithm";
+ break;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ if (MeteringModeTable.count(idx)) {
+ agc->setMeteringMode(MeteringModeTable.at(idx));
+ libcameraMetadata_.set(controls::AeMeteringMode, idx);
+ } else {
+ LOG(IPARPI, Error) << "Metering mode " << idx
+ << " not recognised";
+ }
+ break;
+ }
+
+ case controls::AE_CONSTRAINT_MODE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AE_CONSTRAINT_MODE - no AGC algorithm";
+ break;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ if (ConstraintModeTable.count(idx)) {
+ agc->setConstraintMode(ConstraintModeTable.at(idx));
+ libcameraMetadata_.set(controls::AeConstraintMode, idx);
+ } else {
+ LOG(IPARPI, Error) << "Constraint mode " << idx
+ << " not recognised";
+ }
+ break;
+ }
+
+ case controls::AE_EXPOSURE_MODE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AE_EXPOSURE_MODE - no AGC algorithm";
+ break;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ if (ExposureModeTable.count(idx)) {
+ agc->setExposureMode(ExposureModeTable.at(idx));
+ libcameraMetadata_.set(controls::AeExposureMode, idx);
+ } else {
+ LOG(IPARPI, Error) << "Exposure mode " << idx
+ << " not recognised";
+ }
+ break;
+ }
+
+ case controls::EXPOSURE_VALUE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set EXPOSURE_VALUE - no AGC algorithm";
+ break;
+ }
+
+ /*
+ * The SetEv() function takes in a direct exposure multiplier.
+ * So convert to 2^EV
+ */
+ double ev = pow(2.0, ctrl.second.get<float>());
+ agc->setEv(0, ev);
+ libcameraMetadata_.set(controls::ExposureValue,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::AE_FLICKER_MODE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AeFlickerMode - no AGC algorithm";
+ break;
+ }
+
+ int32_t mode = ctrl.second.get<int32_t>();
+ bool modeValid = true;
+
+ switch (mode) {
+ case controls::FlickerOff:
+ agc->setFlickerPeriod(0us);
+
+ break;
+
+ case controls::FlickerManual:
+ agc->setFlickerPeriod(flickerState_.manualPeriod);
+
+ break;
+
+ default:
+ LOG(IPARPI, Error) << "Flicker mode " << mode << " is not supported";
+ modeValid = false;
+
+ break;
+ }
+
+ if (modeValid)
+ flickerState_.mode = mode;
+
+ break;
+ }
+
+ case controls::AE_FLICKER_PERIOD: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AeFlickerPeriod - no AGC algorithm";
+ break;
+ }
+
+ uint32_t manualPeriod = ctrl.second.get<int32_t>();
+ flickerState_.manualPeriod = manualPeriod * 1.0us;
+
+ /*
+ * We note that it makes no difference if the mode gets set to "manual"
+ * first, and the period updated after, or vice versa.
+ */
+ if (flickerState_.mode == controls::FlickerManual)
+ agc->setFlickerPeriod(flickerState_.manualPeriod);
+
+ break;
+ }
+
+ case controls::AWB_ENABLE: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (!awb) {
+ LOG(IPARPI, Warning)
+ << "Could not set AWB_ENABLE - no AWB algorithm";
+ break;
+ }
+
+ if (ctrl.second.get<bool>() == false)
+ awb->disableAuto();
+ else
+ awb->enableAuto();
+
+ libcameraMetadata_.set(controls::AwbEnable,
+ ctrl.second.get<bool>());
+ break;
+ }
+
+ case controls::AWB_MODE: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (!awb) {
+ LOG(IPARPI, Warning)
+ << "Could not set AWB_MODE - no AWB algorithm";
+ break;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ if (AwbModeTable.count(idx)) {
+ awb->setMode(AwbModeTable.at(idx));
+ libcameraMetadata_.set(controls::AwbMode, idx);
+ } else {
+ LOG(IPARPI, Error) << "AWB mode " << idx
+ << " not recognised";
+ }
+ break;
+ }
+
+ case controls::COLOUR_GAINS: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ auto gains = ctrl.second.get<Span<const float>>();
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (!awb) {
+ LOG(IPARPI, Warning)
+ << "Could not set COLOUR_GAINS - no AWB algorithm";
+ break;
+ }
+
+ awb->setManualGains(gains[0], gains[1]);
+ if (gains[0] != 0.0f && gains[1] != 0.0f)
+ /* A gain of 0.0f will switch back to auto mode. */
+ libcameraMetadata_.set(controls::ColourGains,
+ { gains[0], gains[1] });
+ break;
+ }
+
+ case controls::BRIGHTNESS: {
+ RPiController::ContrastAlgorithm *contrast = dynamic_cast<RPiController::ContrastAlgorithm *>(
+ controller_.getAlgorithm("contrast"));
+ if (!contrast) {
+ LOG(IPARPI, Warning)
+ << "Could not set BRIGHTNESS - no contrast algorithm";
+ break;
+ }
+
+ contrast->setBrightness(ctrl.second.get<float>() * 65536);
+ libcameraMetadata_.set(controls::Brightness,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::CONTRAST: {
+ RPiController::ContrastAlgorithm *contrast = dynamic_cast<RPiController::ContrastAlgorithm *>(
+ controller_.getAlgorithm("contrast"));
+ if (!contrast) {
+ LOG(IPARPI, Warning)
+ << "Could not set CONTRAST - no contrast algorithm";
+ break;
+ }
+
+ contrast->setContrast(ctrl.second.get<float>());
+ libcameraMetadata_.set(controls::Contrast,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::SATURATION: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ RPiController::CcmAlgorithm *ccm = dynamic_cast<RPiController::CcmAlgorithm *>(
+ controller_.getAlgorithm("ccm"));
+ if (!ccm) {
+ LOG(IPARPI, Warning)
+ << "Could not set SATURATION - no ccm algorithm";
+ break;
+ }
+
+ ccm->setSaturation(ctrl.second.get<float>());
+ libcameraMetadata_.set(controls::Saturation,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::SHARPNESS: {
+ RPiController::SharpenAlgorithm *sharpen = dynamic_cast<RPiController::SharpenAlgorithm *>(
+ controller_.getAlgorithm("sharpen"));
+ if (!sharpen) {
+ LOG(IPARPI, Warning)
+ << "Could not set SHARPNESS - no sharpen algorithm";
+ break;
+ }
+
+ sharpen->setStrength(ctrl.second.get<float>());
+ libcameraMetadata_.set(controls::Sharpness,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::SCALER_CROP: {
+ /* We do nothing with this, but should avoid the warning below. */
+ break;
+ }
+
+ case controls::FRAME_DURATION_LIMITS: {
+ auto frameDurations = ctrl.second.get<Span<const int64_t>>();
+ applyFrameDurations(frameDurations[0] * 1.0us, frameDurations[1] * 1.0us);
+ break;
+ }
+
+ case controls::draft::NOISE_REDUCTION_MODE:
+ /* Handled below in handleControls() */
+ libcameraMetadata_.set(controls::draft::NoiseReductionMode,
+ ctrl.second.get<int32_t>());
+ break;
+
+ case controls::AF_MODE:
+ break; /* We already handled this one above */
+
+ case controls::AF_RANGE: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_RANGE - no focus algorithm";
+ break;
+ }
+
+ auto range = AfRangeTable.find(ctrl.second.get<int32_t>());
+ if (range == AfRangeTable.end()) {
+ LOG(IPARPI, Error) << "AF range " << ctrl.second.get<int32_t>()
+ << " not recognised";
+ break;
+ }
+ af->setRange(range->second);
+ break;
+ }
+
+ case controls::AF_SPEED: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_SPEED - no focus algorithm";
+ break;
+ }
+
+ AfAlgorithm::AfSpeed speed = ctrl.second.get<int32_t>() == controls::AfSpeedFast ?
+ AfAlgorithm::AfSpeedFast : AfAlgorithm::AfSpeedNormal;
+ af->setSpeed(speed);
+ break;
+ }
+
+ case controls::AF_METERING: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_METERING - no AF algorithm";
+ break;
+ }
+ af->setMetering(ctrl.second.get<int32_t>() == controls::AfMeteringWindows);
+ break;
+ }
+
+ case controls::AF_WINDOWS: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_WINDOWS - no AF algorithm";
+ break;
+ }
+ af->setWindows(ctrl.second.get<Span<const Rectangle>>());
+ break;
+ }
+
+ case controls::AF_PAUSE: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af || af->getMode() != AfAlgorithm::AfModeContinuous) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_PAUSE - no AF algorithm or not Continuous";
+ break;
+ }
+ auto pause = AfPauseTable.find(ctrl.second.get<int32_t>());
+ if (pause == AfPauseTable.end()) {
+ LOG(IPARPI, Error) << "AF pause " << ctrl.second.get<int32_t>()
+ << " not recognised";
+ break;
+ }
+ af->pause(pause->second);
+ break;
+ }
+
+ case controls::AF_TRIGGER: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af || af->getMode() != AfAlgorithm::AfModeAuto) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_TRIGGER - no AF algorithm or not Auto";
+ break;
+ } else {
+ if (ctrl.second.get<int32_t>() == controls::AfTriggerStart)
+ af->triggerScan();
+ else
+ af->cancelScan();
+ }
+ break;
+ }
+
+ case controls::LENS_POSITION: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (af) {
+ int32_t hwpos;
+ if (af->setLensPosition(ctrl.second.get<float>(), &hwpos)) {
+ ControlList lensCtrls(lensCtrls_);
+ lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE, hwpos);
+ setLensControls.emit(lensCtrls);
+ }
+ } else {
+ LOG(IPARPI, Warning)
+ << "Could not set LENS_POSITION - no AF algorithm";
+ }
+ break;
+ }
+
+ case controls::HDR_MODE: {
+ HdrAlgorithm *hdr = dynamic_cast<HdrAlgorithm *>(controller_.getAlgorithm("hdr"));
+ if (!hdr) {
+ LOG(IPARPI, Warning) << "No HDR algorithm available";
+ break;
+ }
+
+ auto mode = HdrModeTable.find(ctrl.second.get<int32_t>());
+ if (mode == HdrModeTable.end()) {
+ LOG(IPARPI, Warning) << "Unrecognised HDR mode";
+ break;
+ }
+
+ AgcAlgorithm *agc = dynamic_cast<AgcAlgorithm *>(controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning) << "HDR requires an AGC algorithm";
+ break;
+ }
+
+ if (hdr->setMode(mode->second) == 0) {
+ agc->setActiveChannels(hdr->getChannels());
+
+ /* We also disable adpative contrast enhancement if HDR is running. */
+ ContrastAlgorithm *contrast =
+ dynamic_cast<ContrastAlgorithm *>(controller_.getAlgorithm("contrast"));
+ if (contrast) {
+ if (mode->second == "Off")
+ contrast->restoreCe();
+ else
+ contrast->enableCe(false);
+ }
+
+ DenoiseAlgorithm *denoise =
+ dynamic_cast<DenoiseAlgorithm *>(controller_.getAlgorithm("denoise"));
+ if (denoise) {
+ /* \todo - make the HDR mode say what denoise it wants? */
+ if (mode->second == "Night")
+ denoise->setConfig("night");
+ else if (mode->second == "SingleExposure")
+ denoise->setConfig("hdr");
+ /* MultiExposure doesn't need extra extra denoise. */
+ else
+ denoise->setConfig("normal");
+ }
+ } else
+ LOG(IPARPI, Warning)
+ << "HDR mode " << mode->second << " not supported";
+
+ break;
+ }
+
+ case controls::rpi::STATS_OUTPUT_ENABLE:
+ statsMetadataOutput_ = ctrl.second.get<bool>();
+ break;
+
+ default:
+ LOG(IPARPI, Warning)
+ << "Ctrl " << controls::controls.at(ctrl.first)->name()
+ << " is not handled.";
+ break;
+ }
+ }
+
+ /* Give derived classes a chance to examine the new controls. */
+ handleControls(controls);
+}
+
+void IpaBase::fillDeviceStatus(const ControlList &sensorControls, unsigned int ipaContext)
+{
+ DeviceStatus deviceStatus = {};
+
+ int32_t exposureLines = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ int32_t gainCode = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>();
+ int32_t vblank = sensorControls.get(V4L2_CID_VBLANK).get<int32_t>();
+ int32_t hblank = sensorControls.get(V4L2_CID_HBLANK).get<int32_t>();
+
+ deviceStatus.lineLength = helper_->hblankToLineLength(hblank);
+ deviceStatus.shutterSpeed = helper_->exposure(exposureLines, deviceStatus.lineLength);
+ deviceStatus.analogueGain = helper_->gain(gainCode);
+ deviceStatus.frameLength = mode_.height + vblank;
+
+ RPiController::AfAlgorithm *af = dynamic_cast<RPiController::AfAlgorithm *>(
+ controller_.getAlgorithm("af"));
+ if (af)
+ deviceStatus.lensPosition = af->getLensPosition();
+
+ LOG(IPARPI, Debug) << "Metadata - " << deviceStatus;
+
+ rpiMetadata_[ipaContext].set("device.status", deviceStatus);
+}
+
+void IpaBase::reportMetadata(unsigned int ipaContext)
+{
+ RPiController::Metadata &rpiMetadata = rpiMetadata_[ipaContext];
+ std::unique_lock<RPiController::Metadata> lock(rpiMetadata);
+
+ /*
+ * Certain information about the current frame and how it will be
+ * processed can be extracted and placed into the libcamera metadata
+ * buffer, where an application could query it.
+ */
+ DeviceStatus *deviceStatus = rpiMetadata.getLocked<DeviceStatus>("device.status");
+ if (deviceStatus) {
+ libcameraMetadata_.set(controls::ExposureTime,
+ deviceStatus->shutterSpeed.get<std::micro>());
+ libcameraMetadata_.set(controls::AnalogueGain, deviceStatus->analogueGain);
+ libcameraMetadata_.set(controls::FrameDuration,
+ helper_->exposure(deviceStatus->frameLength, deviceStatus->lineLength).get<std::micro>());
+ if (deviceStatus->sensorTemperature)
+ libcameraMetadata_.set(controls::SensorTemperature, *deviceStatus->sensorTemperature);
+ if (deviceStatus->lensPosition)
+ libcameraMetadata_.set(controls::LensPosition, *deviceStatus->lensPosition);
+ }
+
+ AgcPrepareStatus *agcPrepareStatus = rpiMetadata.getLocked<AgcPrepareStatus>("agc.prepare_status");
+ if (agcPrepareStatus) {
+ libcameraMetadata_.set(controls::AeLocked, agcPrepareStatus->locked);
+ libcameraMetadata_.set(controls::DigitalGain, agcPrepareStatus->digitalGain);
+ }
+
+ LuxStatus *luxStatus = rpiMetadata.getLocked<LuxStatus>("lux.status");
+ if (luxStatus)
+ libcameraMetadata_.set(controls::Lux, luxStatus->lux);
+
+ AwbStatus *awbStatus = rpiMetadata.getLocked<AwbStatus>("awb.status");
+ if (awbStatus) {
+ libcameraMetadata_.set(controls::ColourGains, { static_cast<float>(awbStatus->gainR),
+ static_cast<float>(awbStatus->gainB) });
+ libcameraMetadata_.set(controls::ColourTemperature, awbStatus->temperatureK);
+ }
+
+ BlackLevelStatus *blackLevelStatus = rpiMetadata.getLocked<BlackLevelStatus>("black_level.status");
+ if (blackLevelStatus)
+ libcameraMetadata_.set(controls::SensorBlackLevels,
+ { static_cast<int32_t>(blackLevelStatus->blackLevelR),
+ static_cast<int32_t>(blackLevelStatus->blackLevelG),
+ static_cast<int32_t>(blackLevelStatus->blackLevelG),
+ static_cast<int32_t>(blackLevelStatus->blackLevelB) });
+
+ RPiController::FocusRegions *focusStatus =
+ rpiMetadata.getLocked<RPiController::FocusRegions>("focus.status");
+ if (focusStatus) {
+ /*
+ * Calculate the average FoM over the central (symmetric) positions
+ * to give an overall scene FoM. This can change later if it is
+ * not deemed suitable.
+ */
+ libcamera::Size size = focusStatus->size();
+ unsigned rows = size.height;
+ unsigned cols = size.width;
+
+ uint64_t sum = 0;
+ unsigned int numRegions = 0;
+ for (unsigned r = rows / 3; r < rows - rows / 3; ++r) {
+ for (unsigned c = cols / 4; c < cols - cols / 4; ++c) {
+ sum += focusStatus->get({ (int)c, (int)r }).val;
+ numRegions++;
+ }
+ }
+
+ uint32_t focusFoM = sum / numRegions;
+ libcameraMetadata_.set(controls::FocusFoM, focusFoM);
+ }
+
+ CcmStatus *ccmStatus = rpiMetadata.getLocked<CcmStatus>("ccm.status");
+ if (ccmStatus) {
+ float m[9];
+ for (unsigned int i = 0; i < 9; i++)
+ m[i] = ccmStatus->matrix[i];
+ libcameraMetadata_.set(controls::ColourCorrectionMatrix, m);
+ }
+
+ const AfStatus *afStatus = rpiMetadata.getLocked<AfStatus>("af.status");
+ if (afStatus) {
+ int32_t s, p;
+ switch (afStatus->state) {
+ case AfState::Scanning:
+ s = controls::AfStateScanning;
+ break;
+ case AfState::Focused:
+ s = controls::AfStateFocused;
+ break;
+ case AfState::Failed:
+ s = controls::AfStateFailed;
+ break;
+ default:
+ s = controls::AfStateIdle;
+ }
+ switch (afStatus->pauseState) {
+ case AfPauseState::Pausing:
+ p = controls::AfPauseStatePausing;
+ break;
+ case AfPauseState::Paused:
+ p = controls::AfPauseStatePaused;
+ break;
+ default:
+ p = controls::AfPauseStateRunning;
+ }
+ libcameraMetadata_.set(controls::AfState, s);
+ libcameraMetadata_.set(controls::AfPauseState, p);
+ }
+
+ /*
+ * THe HDR algorithm sets the HDR channel into the agc.status at the time that those
+ * AGC parameters were calculated several frames ago, so it comes back to us now in
+ * the delayed_status. If this frame is too soon after a mode switch for the
+ * delayed_status to be available, we use the HDR status that came out of the
+ * switchMode call.
+ */
+ const AgcStatus *agcStatus = rpiMetadata.getLocked<AgcStatus>("agc.delayed_status");
+ const HdrStatus &hdrStatus = agcStatus ? agcStatus->hdr : hdrStatus_;
+ if (!hdrStatus.mode.empty() && hdrStatus.mode != "Off") {
+ int32_t hdrMode = controls::HdrModeOff;
+ for (auto const &[mode, name] : HdrModeTable) {
+ if (hdrStatus.mode == name) {
+ hdrMode = mode;
+ break;
+ }
+ }
+ libcameraMetadata_.set(controls::HdrMode, hdrMode);
+
+ if (hdrStatus.channel == "short")
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelShort);
+ else if (hdrStatus.channel == "long")
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelLong);
+ else if (hdrStatus.channel == "medium")
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelMedium);
+ else
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelNone);
+ }
+
+ metadataReady.emit(libcameraMetadata_);
+}
+
+void IpaBase::applyFrameDurations(Duration minFrameDuration, Duration maxFrameDuration)
+{
+ /*
+ * This will only be applied once AGC recalculations occur.
+ * The values may be clamped based on the sensor mode capabilities as well.
+ */
+ minFrameDuration_ = minFrameDuration ? minFrameDuration : defaultMinFrameDuration;
+ maxFrameDuration_ = maxFrameDuration ? maxFrameDuration : defaultMaxFrameDuration;
+ minFrameDuration_ = std::clamp(minFrameDuration_,
+ mode_.minFrameDuration, mode_.maxFrameDuration);
+ maxFrameDuration_ = std::clamp(maxFrameDuration_,
+ mode_.minFrameDuration, mode_.maxFrameDuration);
+ maxFrameDuration_ = std::max(maxFrameDuration_, minFrameDuration_);
+
+ /* Return the validated limits via metadata. */
+ libcameraMetadata_.set(controls::FrameDurationLimits,
+ { static_cast<int64_t>(minFrameDuration_.get<std::micro>()),
+ static_cast<int64_t>(maxFrameDuration_.get<std::micro>()) });
+
+ /*
+ * Calculate the maximum exposure time possible for the AGC to use.
+ * getBlanking() will update maxShutter with the largest exposure
+ * value possible.
+ */
+ Duration maxShutter = Duration::max();
+ helper_->getBlanking(maxShutter, minFrameDuration_, maxFrameDuration_);
+
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ agc->setMaxShutter(maxShutter);
+}
+
+void IpaBase::applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls)
+{
+ const int32_t minGainCode = helper_->gainCode(mode_.minAnalogueGain);
+ const int32_t maxGainCode = helper_->gainCode(mode_.maxAnalogueGain);
+ int32_t gainCode = helper_->gainCode(agcStatus->analogueGain);
+
+ /*
+ * Ensure anything larger than the max gain code will not be passed to
+ * DelayedControls. The AGC will correctly handle a lower gain returned
+ * by the sensor, provided it knows the actual gain used.
+ */
+ gainCode = std::clamp<int32_t>(gainCode, minGainCode, maxGainCode);
+
+ /* getBlanking might clip exposure time to the fps limits. */
+ Duration exposure = agcStatus->shutterTime;
+ auto [vblank, hblank] = helper_->getBlanking(exposure, minFrameDuration_, maxFrameDuration_);
+ int32_t exposureLines = helper_->exposureLines(exposure,
+ helper_->hblankToLineLength(hblank));
+
+ LOG(IPARPI, Debug) << "Applying AGC Exposure: " << exposure
+ << " (Shutter lines: " << exposureLines << ", AGC requested "
+ << agcStatus->shutterTime << ") Gain: "
+ << agcStatus->analogueGain << " (Gain Code: "
+ << gainCode << ")";
+
+ ctrls.set(V4L2_CID_VBLANK, static_cast<int32_t>(vblank));
+ ctrls.set(V4L2_CID_EXPOSURE, exposureLines);
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN, gainCode);
+
+ /*
+ * At present, there is no way of knowing if a control is read-only.
+ * As a workaround, assume that if the minimum and maximum values of
+ * the V4L2_CID_HBLANK control are the same, it implies the control
+ * is read-only. This seems to be the case for all the cameras our IPA
+ * works with.
+ *
+ * \todo The control API ought to have a flag to specify if a control
+ * is read-only which could be used below.
+ */
+ if (mode_.minLineLength != mode_.maxLineLength)
+ ctrls.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblank));
+
+ /*
+ * Store the frame length times in a circular queue, up-to FrameLengthsQueueSize
+ * elements. This will be used to advertise a camera timeout value to the
+ * pipeline handler.
+ */
+ frameLengths_.pop_front();
+ frameLengths_.push_back(helper_->exposure(vblank + mode_.height,
+ helper_->hblankToLineLength(hblank)));
+}
+
+} /* namespace ipa::RPi */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rpi/common/ipa_base.h b/src/ipa/rpi/common/ipa_base.h
new file mode 100644
index 00000000..a95cda1f
--- /dev/null
+++ b/src/ipa/rpi/common/ipa_base.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * Raspberry Pi IPA base class
+ */
+#pragma once
+
+#include <array>
+#include <deque>
+#include <map>
+#include <stdint.h>
+
+#include <libcamera/base/utils.h>
+#include <libcamera/controls.h>
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "cam_helper/cam_helper.h"
+#include "controller/agc_status.h"
+#include "controller/camera_mode.h"
+#include "controller/controller.h"
+#include "controller/hdr_status.h"
+#include "controller/metadata.h"
+
+namespace libcamera {
+
+namespace ipa::RPi {
+
+class IpaBase : public IPARPiInterface
+{
+public:
+ IpaBase();
+ ~IpaBase();
+
+ int32_t init(const IPASettings &settings, const InitParams &params, InitResult *result) override;
+ int32_t configure(const IPACameraSensorInfo &sensorInfo, const ConfigParams &params,
+ ConfigResult *result) override;
+
+ void start(const ControlList &controls, StartResult *result) override;
+ void stop() override {}
+
+ void mapBuffers(const std::vector<IPABuffer> &buffers) override;
+ void unmapBuffers(const std::vector<unsigned int> &ids) override;
+
+ void prepareIsp(const PrepareParams &params) override;
+ void processStats(const ProcessParams &params) override;
+
+protected:
+ bool monoSensor()
+ {
+ return monoSensor_;
+ }
+
+ /* Raspberry Pi controller specific defines. */
+ std::unique_ptr<RPiController::CamHelper> helper_;
+ RPiController::Controller controller_;
+
+ ControlInfoMap sensorCtrls_;
+ ControlInfoMap lensCtrls_;
+
+ /* Camera sensor params. */
+ CameraMode mode_;
+
+ /* Track the frame length times over FrameLengthsQueueSize frames. */
+ std::deque<utils::Duration> frameLengths_;
+ utils::Duration lastTimeout_;
+ ControlList libcameraMetadata_;
+ bool statsMetadataOutput_;
+
+ /* Remember the HDR status after a mode switch. */
+ HdrStatus hdrStatus_;
+
+ /* Whether the stitch block (if available) needs to swap buffers. */
+ bool stitchSwapBuffers_;
+
+private:
+ /* Number of metadata objects available in the context list. */
+ static constexpr unsigned int numMetadataContexts = 16;
+
+ virtual int32_t platformInit(const InitParams &params, InitResult *result) = 0;
+ virtual int32_t platformStart(const ControlList &controls, StartResult *result) = 0;
+ virtual int32_t platformConfigure(const ConfigParams &params, ConfigResult *result) = 0;
+
+ virtual void platformPrepareIsp(const PrepareParams &params,
+ RPiController::Metadata &rpiMetadata) = 0;
+ virtual RPiController::StatisticsPtr platformProcessStats(Span<uint8_t> mem) = 0;
+
+ void setMode(const IPACameraSensorInfo &sensorInfo);
+ void setCameraTimeoutValue();
+ bool validateSensorControls();
+ bool validateLensControls();
+ void applyControls(const ControlList &controls);
+ virtual void handleControls(const ControlList &controls) = 0;
+ void fillDeviceStatus(const ControlList &sensorControls, unsigned int ipaContext);
+ void reportMetadata(unsigned int ipaContext);
+ void applyFrameDurations(utils::Duration minFrameDuration, utils::Duration maxFrameDuration);
+ void applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls);
+
+ std::map<unsigned int, MappedFrameBuffer> buffers_;
+
+ bool lensPresent_;
+ bool monoSensor_;
+
+ std::array<RPiController::Metadata, numMetadataContexts> rpiMetadata_;
+
+ /*
+ * We count frames to decide if the frame must be hidden (e.g. from
+ * display) or mistrusted (i.e. not given to the control algos).
+ */
+ uint64_t frameCount_;
+
+ /* How many frames we should avoid running control algos on. */
+ unsigned int mistrustCount_;
+
+ /* Number of frames that need to be dropped on startup. */
+ unsigned int dropFrameCount_;
+
+ /* Frame timestamp for the last run of the controller. */
+ uint64_t lastRunTimestamp_;
+
+ /* Do we run a Controller::process() for this frame? */
+ bool processPending_;
+
+ /* Distinguish the first camera start from others. */
+ bool firstStart_;
+
+ /* Frame duration (1/fps) limits. */
+ utils::Duration minFrameDuration_;
+ utils::Duration maxFrameDuration_;
+
+ /* The current state of flicker avoidance. */
+ struct FlickerState {
+ int32_t mode;
+ utils::Duration manualPeriod;
+ } flickerState_;
+};
+
+} /* namespace ipa::RPi */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rpi/common/meson.build b/src/ipa/rpi/common/meson.build
new file mode 100644
index 00000000..73d2ee73
--- /dev/null
+++ b/src/ipa/rpi/common/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: CC0-1.0
+
+rpi_ipa_common_sources = files([
+ 'ipa_base.cpp',
+])
+
+rpi_ipa_common_includes = [
+ include_directories('..'),
+]
+
+rpi_ipa_common_deps = [
+ libcamera_private,
+]
+
+rpi_ipa_common_lib = static_library('rpi_ipa_common', rpi_ipa_common_sources,
+ include_directories : rpi_ipa_common_includes,
+ dependencies : rpi_ipa_common_deps)
diff --git a/src/ipa/rpi/controller/af_algorithm.h b/src/ipa/rpi/controller/af_algorithm.h
new file mode 100644
index 00000000..ad9b5754
--- /dev/null
+++ b/src/ipa/rpi/controller/af_algorithm.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * af_algorithm.hpp - auto focus algorithm interface
+ */
+#pragma once
+
+#include <optional>
+
+#include <libcamera/base/span.h>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class AfAlgorithm : public Algorithm
+{
+public:
+ AfAlgorithm(Controller *controller)
+ : Algorithm(controller) {}
+
+ /*
+ * An autofocus algorithm should provide the following calls.
+ *
+ * Where a ControlList combines a change of AfMode with other AF
+ * controls, setMode() should be called first, to ensure the
+ * algorithm will be in the correct state to handle controls.
+ *
+ * setLensPosition() returns true if the mode was AfModeManual and
+ * the lens position has changed, otherwise returns false. When it
+ * returns true, hwpos should be sent immediately to the lens driver.
+ *
+ * getMode() is provided mainly for validating controls.
+ * getLensPosition() is provided for populating DeviceStatus.
+ */
+
+ enum AfRange { AfRangeNormal = 0,
+ AfRangeMacro,
+ AfRangeFull,
+ AfRangeMax };
+
+ enum AfSpeed { AfSpeedNormal = 0,
+ AfSpeedFast,
+ AfSpeedMax };
+
+ enum AfMode { AfModeManual = 0,
+ AfModeAuto,
+ AfModeContinuous };
+
+ enum AfPause { AfPauseImmediate = 0,
+ AfPauseDeferred,
+ AfPauseResume };
+
+ virtual void setRange([[maybe_unused]] AfRange range)
+ {
+ }
+ virtual void setSpeed([[maybe_unused]] AfSpeed speed)
+ {
+ }
+ virtual void setMetering([[maybe_unused]] bool use_windows)
+ {
+ }
+ virtual void setWindows([[maybe_unused]] libcamera::Span<libcamera::Rectangle const> const &wins)
+ {
+ }
+ virtual void setMode(AfMode mode) = 0;
+ virtual AfMode getMode() const = 0;
+ virtual bool setLensPosition(double dioptres, int32_t *hwpos) = 0;
+ virtual std::optional<double> getLensPosition() const = 0;
+ virtual void triggerScan() = 0;
+ virtual void cancelScan() = 0;
+ virtual void pause(AfPause pause) = 0;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/af_status.h b/src/ipa/rpi/controller/af_status.h
new file mode 100644
index 00000000..c1487cc4
--- /dev/null
+++ b/src/ipa/rpi/controller/af_status.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * AF control algorithm status
+ */
+#pragma once
+
+#include <optional>
+
+/*
+ * The AF algorithm should post the following structure into the image's
+ * "af.status" metadata. lensSetting should control the lens.
+ */
+
+enum class AfState {
+ Idle = 0,
+ Scanning,
+ Focused,
+ Failed
+};
+
+enum class AfPauseState {
+ Running = 0,
+ Pausing,
+ Paused
+};
+
+struct AfStatus {
+ /* state for reporting */
+ AfState state;
+ AfPauseState pauseState;
+ /* lensSetting should be sent to the lens driver, when valid */
+ std::optional<int> lensSetting;
+};
diff --git a/src/ipa/rpi/controller/agc_algorithm.h b/src/ipa/rpi/controller/agc_algorithm.h
new file mode 100644
index 00000000..1132de7e
--- /dev/null
+++ b/src/ipa/rpi/controller/agc_algorithm.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm interface
+ */
+#pragma once
+
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class AgcAlgorithm : public Algorithm
+{
+public:
+ AgcAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* An AGC algorithm must provide the following: */
+ virtual unsigned int getConvergenceFrames() const = 0;
+ virtual std::vector<double> const &getWeights() const = 0;
+ virtual void setEv(unsigned int channel, double ev) = 0;
+ virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0;
+ virtual void setFixedShutter(unsigned int channel,
+ libcamera::utils::Duration fixedShutter) = 0;
+ virtual void setMaxShutter(libcamera::utils::Duration maxShutter) = 0;
+ virtual void setFixedAnalogueGain(unsigned int channel, double fixedAnalogueGain) = 0;
+ virtual void setMeteringMode(std::string const &meteringModeName) = 0;
+ virtual void setExposureMode(std::string const &exposureModeName) = 0;
+ virtual void setConstraintMode(std::string const &contraintModeName) = 0;
+ virtual void enableAuto() = 0;
+ virtual void disableAuto() = 0;
+ virtual void setActiveChannels(const std::vector<unsigned int> &activeChannels) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/agc_status.h b/src/ipa/rpi/controller/agc_status.h
new file mode 100644
index 00000000..c7c87b83
--- /dev/null
+++ b/src/ipa/rpi/controller/agc_status.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm status
+ */
+#pragma once
+
+#include <string>
+
+#include <libcamera/base/utils.h>
+
+#include "hdr_status.h"
+
+/*
+ * The AGC algorithm process method should post an AgcStatus into the image
+ * metadata under the tag "agc.status".
+ * The AGC algorithm prepare method should post an AgcPrepareStatus instead
+ * under "agc.prepare_status".
+ */
+
+/*
+ * Note: total_exposure_value will be reported as zero until the algorithm has
+ * seen statistics and calculated meaningful values. The contents should be
+ * ignored until then.
+ */
+
+struct AgcStatus {
+ libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */
+ libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */
+ libcamera::utils::Duration shutterTime;
+ double analogueGain;
+ std::string exposureMode;
+ std::string constraintMode;
+ std::string meteringMode;
+ double ev;
+ libcamera::utils::Duration flickerPeriod;
+ int floatingRegionEnable;
+ libcamera::utils::Duration fixedShutter;
+ double fixedAnalogueGain;
+ unsigned int channel;
+ HdrStatus hdr;
+};
+
+struct AgcPrepareStatus {
+ double digitalGain;
+ int locked;
+};
diff --git a/src/ipa/rpi/controller/algorithm.cpp b/src/ipa/rpi/controller/algorithm.cpp
new file mode 100644
index 00000000..beed47a1
--- /dev/null
+++ b/src/ipa/rpi/controller/algorithm.cpp
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ISP control algorithms
+ */
+
+#include "algorithm.h"
+
+using namespace RPiController;
+
+int Algorithm::read([[maybe_unused]] const libcamera::YamlObject &params)
+{
+ return 0;
+}
+
+void Algorithm::initialise()
+{
+}
+
+void Algorithm::switchMode([[maybe_unused]] CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+}
+
+void Algorithm::prepare([[maybe_unused]] Metadata *imageMetadata)
+{
+}
+
+void Algorithm::process([[maybe_unused]] StatisticsPtr &stats,
+ [[maybe_unused]] Metadata *imageMetadata)
+{
+}
+
+/* For registering algorithms with the system: */
+
+namespace {
+
+std::map<std::string, AlgoCreateFunc> &algorithms()
+{
+ static std::map<std::string, AlgoCreateFunc> algorithms;
+ return algorithms;
+}
+
+} /* namespace */
+
+std::map<std::string, AlgoCreateFunc> const &RPiController::getAlgorithms()
+{
+ return algorithms();
+}
+
+RegisterAlgorithm::RegisterAlgorithm(char const *name,
+ AlgoCreateFunc createFunc)
+{
+ algorithms()[std::string(name)] = createFunc;
+}
diff --git a/src/ipa/rpi/controller/algorithm.h b/src/ipa/rpi/controller/algorithm.h
new file mode 100644
index 00000000..1971bfdc
--- /dev/null
+++ b/src/ipa/rpi/controller/algorithm.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ISP control algorithm interface
+ */
+#pragma once
+
+/*
+ * All algorithms should be derived from this class and made available to the
+ * Controller.
+ */
+
+#include <string>
+#include <memory>
+#include <map>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "controller.h"
+
+namespace RPiController {
+
+/* This defines the basic interface for all control algorithms. */
+
+class Algorithm
+{
+public:
+ Algorithm(Controller *controller)
+ : controller_(controller)
+ {
+ }
+ virtual ~Algorithm() = default;
+ virtual char const *name() const = 0;
+ virtual int read(const libcamera::YamlObject &params);
+ virtual void initialise();
+ virtual void switchMode(CameraMode const &cameraMode, Metadata *metadata);
+ virtual void prepare(Metadata *imageMetadata);
+ virtual void process(StatisticsPtr &stats, Metadata *imageMetadata);
+ Metadata &getGlobalMetadata() const
+ {
+ return controller_->getGlobalMetadata();
+ }
+ const std::string &getTarget() const
+ {
+ return controller_->getTarget();
+ }
+ const Controller::HardwareConfig &getHardwareConfig() const
+ {
+ return controller_->getHardwareConfig();
+ }
+
+private:
+ Controller *controller_;
+};
+
+/*
+ * This code is for automatic registration of Front End algorithms with the
+ * system.
+ */
+
+typedef Algorithm *(*AlgoCreateFunc)(Controller *controller);
+struct RegisterAlgorithm {
+ RegisterAlgorithm(char const *name, AlgoCreateFunc createFunc);
+};
+std::map<std::string, AlgoCreateFunc> const &getAlgorithms();
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/alsc_status.h b/src/ipa/rpi/controller/alsc_status.h
new file mode 100644
index 00000000..329e8a37
--- /dev/null
+++ b/src/ipa/rpi/controller/alsc_status.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ALSC (auto lens shading correction) control algorithm status
+ */
+#pragma once
+
+#include <vector>
+
+/*
+ * The ALSC algorithm should post the following structure into the image's
+ * "alsc.status" metadata.
+ */
+
+struct AlscStatus {
+ std::vector<double> r;
+ std::vector<double> g;
+ std::vector<double> b;
+ unsigned int rows;
+ unsigned int cols;
+};
diff --git a/src/ipa/rpi/controller/awb_algorithm.h b/src/ipa/rpi/controller/awb_algorithm.h
new file mode 100644
index 00000000..1779b050
--- /dev/null
+++ b/src/ipa/rpi/controller/awb_algorithm.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AWB control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class AwbAlgorithm : public Algorithm
+{
+public:
+ AwbAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* An AWB algorithm must provide the following: */
+ virtual unsigned int getConvergenceFrames() const = 0;
+ virtual void initialValues(double &gainR, double &gainB) = 0;
+ virtual void setMode(std::string const &modeName) = 0;
+ virtual void setManualGains(double manualR, double manualB) = 0;
+ virtual void enableAuto() = 0;
+ virtual void disableAuto() = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/awb_status.h b/src/ipa/rpi/controller/awb_status.h
new file mode 100644
index 00000000..125df1a0
--- /dev/null
+++ b/src/ipa/rpi/controller/awb_status.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AWB control algorithm status
+ */
+#pragma once
+
+/*
+ * The AWB algorithm places its results into both the image and global metadata,
+ * under the tag "awb.status".
+ */
+
+struct AwbStatus {
+ char mode[32];
+ double temperatureK;
+ double gainR;
+ double gainG;
+ double gainB;
+};
diff --git a/src/ipa/rpi/controller/black_level_algorithm.h b/src/ipa/rpi/controller/black_level_algorithm.h
new file mode 100644
index 00000000..ce044e59
--- /dev/null
+++ b/src/ipa/rpi/controller/black_level_algorithm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * black level control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class BlackLevelAlgorithm : public Algorithm
+{
+public:
+ BlackLevelAlgorithm(Controller *controller)
+ : Algorithm(controller) {}
+ /* A black level algorithm must provide the following: */
+ virtual void initialValues(uint16_t &blackLevelR, uint16_t &blackLevelG,
+ uint16_t &blackLevelB) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/black_level_status.h b/src/ipa/rpi/controller/black_level_status.h
new file mode 100644
index 00000000..57a0705a
--- /dev/null
+++ b/src/ipa/rpi/controller/black_level_status.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * black level control algorithm status
+ */
+#pragma once
+
+/* The "black level" algorithm stores the black levels to use. */
+
+struct BlackLevelStatus {
+ uint16_t blackLevelR; /* out of 16 bits */
+ uint16_t blackLevelG;
+ uint16_t blackLevelB;
+};
diff --git a/src/ipa/rpi/controller/cac_status.h b/src/ipa/rpi/controller/cac_status.h
new file mode 100644
index 00000000..475d4c5c
--- /dev/null
+++ b/src/ipa/rpi/controller/cac_status.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * CAC (Chromatic Abberation Correction) algorithm status
+ */
+#pragma once
+
+#include "pwl.h"
+
+struct CacStatus {
+ std::vector<double> lutRx;
+ std::vector<double> lutRy;
+ std::vector<double> lutBx;
+ std::vector<double> lutBy;
+};
diff --git a/src/ipa/rpi/controller/camera_mode.h b/src/ipa/rpi/controller/camera_mode.h
new file mode 100644
index 00000000..4fdb5b85
--- /dev/null
+++ b/src/ipa/rpi/controller/camera_mode.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2020, Raspberry Pi Ltd
+ *
+ * description of a particular operating mode of a sensor
+ */
+#pragma once
+
+#include <libcamera/transform.h>
+
+#include <libcamera/base/utils.h>
+
+/*
+ * Description of a "camera mode", holding enough information for control
+ * algorithms to adapt their behaviour to the different modes of the camera,
+ * including binning, scaling, cropping etc.
+ */
+
+struct CameraMode {
+ /* bit depth of the raw camera output */
+ uint32_t bitdepth;
+ /* size in pixels of frames in this mode */
+ uint16_t width;
+ uint16_t height;
+ /* size of full resolution uncropped frame ("sensor frame") */
+ uint16_t sensorWidth;
+ uint16_t sensorHeight;
+ /* binning factor (1 = no binning, 2 = 2-pixel binning etc.) */
+ uint8_t binX;
+ uint8_t binY;
+ /* location of top left pixel in the sensor frame */
+ uint16_t cropX;
+ uint16_t cropY;
+ /* scaling factor (so if uncropped, width*scaleX is sensorWidth) */
+ double scaleX;
+ double scaleY;
+ /* scaling of the noise compared to the native sensor mode */
+ double noiseFactor;
+ /* minimum and maximum line time and frame durations */
+ libcamera::utils::Duration minLineLength;
+ libcamera::utils::Duration maxLineLength;
+ libcamera::utils::Duration minFrameDuration;
+ libcamera::utils::Duration maxFrameDuration;
+ /* any camera transform *not* reflected already in the camera tuning */
+ libcamera::Transform transform;
+ /* minimum and maximum frame lengths in units of lines */
+ uint32_t minFrameLength;
+ uint32_t maxFrameLength;
+ /* sensitivity of this mode */
+ double sensitivity;
+ /* pixel clock rate */
+ uint64_t pixelRate;
+ /* Mode specific shutter speed limits */
+ libcamera::utils::Duration minShutter;
+ libcamera::utils::Duration maxShutter;
+ /* Mode specific analogue gain limits */
+ double minAnalogueGain;
+ double maxAnalogueGain;
+};
diff --git a/src/ipa/rpi/controller/ccm_algorithm.h b/src/ipa/rpi/controller/ccm_algorithm.h
new file mode 100644
index 00000000..6678ba75
--- /dev/null
+++ b/src/ipa/rpi/controller/ccm_algorithm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * CCM (colour correction matrix) control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class CcmAlgorithm : public Algorithm
+{
+public:
+ CcmAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A CCM algorithm must provide the following: */
+ virtual void setSaturation(double saturation) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/ccm_status.h b/src/ipa/rpi/controller/ccm_status.h
new file mode 100644
index 00000000..c81bcd42
--- /dev/null
+++ b/src/ipa/rpi/controller/ccm_status.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * CCM (colour correction matrix) control algorithm status
+ */
+#pragma once
+
+/* The "ccm" algorithm generates an appropriate colour matrix. */
+
+struct CcmStatus {
+ double matrix[9];
+ double saturation;
+};
diff --git a/src/ipa/rpi/controller/contrast_algorithm.h b/src/ipa/rpi/controller/contrast_algorithm.h
new file mode 100644
index 00000000..2e983350
--- /dev/null
+++ b/src/ipa/rpi/controller/contrast_algorithm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast (gamma) control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class ContrastAlgorithm : public Algorithm
+{
+public:
+ ContrastAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A contrast algorithm must provide the following: */
+ virtual void setBrightness(double brightness) = 0;
+ virtual void setContrast(double contrast) = 0;
+ virtual void enableCe(bool enable) = 0;
+ virtual void restoreCe() = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/contrast_status.h b/src/ipa/rpi/controller/contrast_status.h
new file mode 100644
index 00000000..7c67f054
--- /dev/null
+++ b/src/ipa/rpi/controller/contrast_status.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast (gamma) control algorithm status
+ */
+#pragma once
+
+#include "pwl.h"
+
+/*
+ * The "contrast" algorithm creates a gamma curve, optionally doing a little bit
+ * of contrast stretching based on the AGC histogram.
+ */
+
+struct ContrastStatus {
+ RPiController::Pwl gammaCurve;
+ double brightness;
+ double contrast;
+};
diff --git a/src/ipa/rpi/controller/controller.cpp b/src/ipa/rpi/controller/controller.cpp
new file mode 100644
index 00000000..e0131018
--- /dev/null
+++ b/src/ipa/rpi/controller/controller.cpp
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ISP controller
+ */
+
+#include <assert.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "algorithm.h"
+#include "controller.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(RPiController)
+
+static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap = {
+ {
+ "bcm2835",
+ {
+ /*
+ * There are only ever 15 AGC regions computed by the firmware
+ * due to zoning, but the HW defines AGC_REGIONS == 16!
+ */
+ .agcRegions = { 15 , 1 },
+ .agcZoneWeights = { 15 , 1 },
+ .awbRegions = { 16, 12 },
+ .cacRegions = { 0, 0 },
+ .focusRegions = { 4, 3 },
+ .numHistogramBins = 128,
+ .numGammaPoints = 33,
+ .pipelineWidth = 13,
+ .statsInline = false,
+ .minPixelProcessingTime = 0s,
+ }
+ },
+ {
+ "pisp",
+ {
+ .agcRegions = { 0, 0 },
+ .agcZoneWeights = { 15, 15 },
+ .awbRegions = { 32, 32 },
+ .cacRegions = { 8, 8 },
+ .focusRegions = { 8, 8 },
+ .numHistogramBins = 1024,
+ .numGammaPoints = 64,
+ .pipelineWidth = 16,
+ .statsInline = true,
+
+ /*
+ * The constraint below is on the rate of pixels going
+ * from CSI2 peripheral to ISP-FE (400Mpix/s, plus tiny
+ * overheads per scanline, for which 380Mpix/s is a
+ * conservative bound).
+ *
+ * There is a 64kbit data FIFO before the bottleneck,
+ * which means that in all reasonable cases the
+ * constraint applies at a timescale >= 1 scanline, so
+ * adding horizontal blanking can prevent loss.
+ *
+ * If the backlog were to grow beyond 64kbit during a
+ * single scanline, there could still be loss. This
+ * could happen using 4 lanes at 1.5Gbps at 10bpp with
+ * frames wider than ~16,000 pixels.
+ */
+ .minPixelProcessingTime = 1.0us / 380,
+ }
+ },
+};
+
+Controller::Controller()
+ : switchModeCalled_(false)
+{
+}
+
+Controller::~Controller() {}
+
+int Controller::read(char const *filename)
+{
+ File file(filename);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPiController, Warning)
+ << "Failed to open tuning file '" << filename << "'";
+ return -EINVAL;
+ }
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root)
+ return -EINVAL;
+
+ double version = (*root)["version"].get<double>(1.0);
+ target_ = (*root)["target"].get<std::string>("bcm2835");
+
+ if (version < 2.0) {
+ LOG(RPiController, Warning)
+ << "This format of the tuning file will be deprecated soon!"
+ << " Please use the convert_tuning.py utility to update to version 2.0.";
+
+ for (auto const &[key, value] : root->asDict()) {
+ int ret = createAlgorithm(key, value);
+ if (ret)
+ return ret;
+ }
+ } else if (version < 3.0) {
+ if (!root->contains("algorithms")) {
+ LOG(RPiController, Error)
+ << "Tuning file " << filename
+ << " does not have an \"algorithms\" list!";
+ return -EINVAL;
+ }
+
+ for (auto const &rootAlgo : (*root)["algorithms"].asList())
+ for (auto const &[key, value] : rootAlgo.asDict()) {
+ int ret = createAlgorithm(key, value);
+ if (ret)
+ return ret;
+ }
+ } else {
+ LOG(RPiController, Error)
+ << "Unrecognised version " << version
+ << " for the tuning file " << filename;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int Controller::createAlgorithm(const std::string &name, const YamlObject &params)
+{
+ auto it = getAlgorithms().find(name);
+ if (it == getAlgorithms().end()) {
+ LOG(RPiController, Warning)
+ << "No algorithm found for \"" << name << "\"";
+ return 0;
+ }
+
+ Algorithm *algo = (*it->second)(this);
+ int ret = algo->read(params);
+ if (ret)
+ return ret;
+
+ algorithms_.push_back(AlgorithmPtr(algo));
+ return 0;
+}
+
+void Controller::initialise()
+{
+ for (auto &algo : algorithms_)
+ algo->initialise();
+}
+
+void Controller::switchMode(CameraMode const &cameraMode, Metadata *metadata)
+{
+ for (auto &algo : algorithms_)
+ algo->switchMode(cameraMode, metadata);
+ switchModeCalled_ = true;
+}
+
+void Controller::prepare(Metadata *imageMetadata)
+{
+ assert(switchModeCalled_);
+ for (auto &algo : algorithms_)
+ algo->prepare(imageMetadata);
+}
+
+void Controller::process(StatisticsPtr stats, Metadata *imageMetadata)
+{
+ assert(switchModeCalled_);
+ for (auto &algo : algorithms_)
+ algo->process(stats, imageMetadata);
+}
+
+Metadata &Controller::getGlobalMetadata()
+{
+ return globalMetadata_;
+}
+
+Algorithm *Controller::getAlgorithm(std::string const &name) const
+{
+ /*
+ * The passed name must be the entire algorithm name, or must match the
+ * last part of it with a period (.) just before.
+ */
+ size_t nameLen = name.length();
+ for (auto &algo : algorithms_) {
+ char const *algoName = algo->name();
+ size_t algoNameLen = strlen(algoName);
+ if (algoNameLen >= nameLen &&
+ strcasecmp(name.c_str(),
+ algoName + algoNameLen - nameLen) == 0 &&
+ (nameLen == algoNameLen ||
+ algoName[algoNameLen - nameLen - 1] == '.'))
+ return algo.get();
+ }
+ return nullptr;
+}
+
+const std::string &Controller::getTarget() const
+{
+ return target_;
+}
+
+const Controller::HardwareConfig &Controller::getHardwareConfig() const
+{
+ auto cfg = HardwareConfigMap.find(getTarget());
+
+ /*
+ * This really should not happen, the IPA ought to validate the target
+ * on initialisation.
+ */
+ ASSERT(cfg != HardwareConfigMap.end());
+ return cfg->second;
+}
diff --git a/src/ipa/rpi/controller/controller.h b/src/ipa/rpi/controller/controller.h
new file mode 100644
index 00000000..eff520bd
--- /dev/null
+++ b/src/ipa/rpi/controller/controller.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ISP controller interface
+ */
+#pragma once
+
+/*
+ * The Controller is simply a container for a collecting together a number of
+ * "control algorithms" (such as AWB etc.) and for running them all in a
+ * convenient manner.
+ */
+
+#include <vector>
+#include <string>
+
+#include <libcamera/base/utils.h>
+#include "libcamera/internal/yaml_parser.h"
+
+#include "camera_mode.h"
+#include "device_status.h"
+#include "metadata.h"
+#include "statistics.h"
+
+namespace RPiController {
+
+class Algorithm;
+typedef std::unique_ptr<Algorithm> AlgorithmPtr;
+
+/*
+ * The Controller holds a pointer to some global_metadata, which is how
+ * different controllers and control algorithms within them can exchange
+ * information. The Prepare function returns a pointer to metadata for this
+ * specific image, and which should be passed on to the Process function.
+ */
+
+class Controller
+{
+public:
+ struct HardwareConfig {
+ libcamera::Size agcRegions;
+ libcamera::Size agcZoneWeights;
+ libcamera::Size awbRegions;
+ libcamera::Size cacRegions;
+ libcamera::Size focusRegions;
+ unsigned int numHistogramBins;
+ unsigned int numGammaPoints;
+ unsigned int pipelineWidth;
+ bool statsInline;
+ libcamera::utils::Duration minPixelProcessingTime;
+ };
+
+ Controller();
+ ~Controller();
+ int read(char const *filename);
+ void initialise();
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata);
+ void prepare(Metadata *imageMetadata);
+ void process(StatisticsPtr stats, Metadata *imageMetadata);
+ Metadata &getGlobalMetadata();
+ Algorithm *getAlgorithm(std::string const &name) const;
+ const std::string &getTarget() const;
+ const HardwareConfig &getHardwareConfig() const;
+
+protected:
+ int createAlgorithm(const std::string &name, const libcamera::YamlObject &params);
+
+ Metadata globalMetadata_;
+ std::vector<AlgorithmPtr> algorithms_;
+ bool switchModeCalled_;
+
+private:
+ std::string target_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/denoise_algorithm.h b/src/ipa/rpi/controller/denoise_algorithm.h
new file mode 100644
index 00000000..b9a2a33c
--- /dev/null
+++ b/src/ipa/rpi/controller/denoise_algorithm.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * Denoise control algorithm interface
+ */
+#pragma once
+
+#include <string>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+enum class DenoiseMode { Off, ColourOff, ColourFast, ColourHighQuality };
+
+class DenoiseAlgorithm : public Algorithm
+{
+public:
+ DenoiseAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A Denoise algorithm must provide the following: */
+ virtual void setMode(DenoiseMode mode) = 0;
+ /* Some platforms may not be able to define this, so supply a default. */
+ virtual void setConfig([[maybe_unused]] std::string const &name) {}
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/denoise_status.h b/src/ipa/rpi/controller/denoise_status.h
new file mode 100644
index 00000000..eead6086
--- /dev/null
+++ b/src/ipa/rpi/controller/denoise_status.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * Denoise control algorithm status
+ */
+#pragma once
+
+/* This stores the parameters required for Denoise. */
+
+struct DenoiseStatus {
+ double noiseConstant;
+ double noiseSlope;
+ double strength;
+ unsigned int mode;
+};
+
+struct SdnStatus {
+ double noiseConstant;
+ double noiseSlope;
+ double noiseConstant2;
+ double noiseSlope2;
+ double strength;
+};
+
+struct CdnStatus {
+ double strength;
+ double threshold;
+};
+
+struct TdnStatus {
+ double noiseConstant;
+ double noiseSlope;
+ double threshold;
+};
diff --git a/src/ipa/rpi/controller/device_status.cpp b/src/ipa/rpi/controller/device_status.cpp
new file mode 100644
index 00000000..68100137
--- /dev/null
+++ b/src/ipa/rpi/controller/device_status.cpp
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * device (image sensor) status
+ */
+#include "device_status.h"
+
+using namespace libcamera; /* for the Duration operator<< overload */
+
+std::ostream &operator<<(std::ostream &out, const DeviceStatus &d)
+{
+ out << "Exposure: " << d.shutterSpeed
+ << " Frame length: " << d.frameLength
+ << " Line length: " << d.lineLength
+ << " Gain: " << d.analogueGain;
+
+ if (d.aperture)
+ out << " Aperture: " << *d.aperture;
+
+ if (d.lensPosition)
+ out << " Lens: " << *d.lensPosition;
+
+ if (d.flashIntensity)
+ out << " Flash: " << *d.flashIntensity;
+
+ if (d.sensorTemperature)
+ out << " Temperature: " << *d.sensorTemperature;
+
+ return out;
+}
diff --git a/src/ipa/raspberrypi/controller/device_status.h b/src/ipa/rpi/controller/device_status.h
index b33f0d09..518f15b5 100644
--- a/src/ipa/raspberrypi/controller/device_status.h
+++ b/src/ipa/rpi/controller/device_status.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
*
- * device_status.h - device (image sensor) status
+ * device (image sensor) status
*/
#pragma once
@@ -18,24 +18,26 @@
struct DeviceStatus {
DeviceStatus()
- : shutter_speed(std::chrono::seconds(0)), frame_length(0),
- analogue_gain(0.0)
+ : shutterSpeed(std::chrono::seconds(0)), frameLength(0),
+ lineLength(std::chrono::seconds(0)), analogueGain(0.0)
{
}
friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d);
/* time shutter is open */
- libcamera::utils::Duration shutter_speed;
+ libcamera::utils::Duration shutterSpeed;
/* frame length given in number of lines */
- uint32_t frame_length;
- double analogue_gain;
- /* 1.0/distance-in-metres, or 0 if unknown */
- std::optional<double> lens_position;
- /* 1/f so that brightness quadruples when this doubles, or 0 if unknown */
+ uint32_t frameLength;
+ /* line length for the current frame */
+ libcamera::utils::Duration lineLength;
+ double analogueGain;
+ /* 1.0/distance-in-metres */
+ std::optional<double> lensPosition;
+ /* 1/f so that brightness quadruples when this doubles */
std::optional<double> aperture;
/* proportional to brightness with 0 = no flash, 1 = maximum flash */
- std::optional<double> flash_intensity;
+ std::optional<double> flashIntensity;
/* Sensor reported temperature value (in degrees) */
- std::optional<double> sensor_temperature;
+ std::optional<double> sensorTemperature;
};
diff --git a/src/ipa/rpi/controller/dpc_status.h b/src/ipa/rpi/controller/dpc_status.h
new file mode 100644
index 00000000..9f30d5d9
--- /dev/null
+++ b/src/ipa/rpi/controller/dpc_status.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * DPC (defective pixel correction) control algorithm status
+ */
+#pragma once
+
+/* The "DPC" algorithm sets defective pixel correction strength. */
+
+struct DpcStatus {
+ int strength; /* 0 = "off", 1 = "normal", 2 = "strong" */
+};
diff --git a/src/ipa/rpi/controller/geq_status.h b/src/ipa/rpi/controller/geq_status.h
new file mode 100644
index 00000000..cb107a48
--- /dev/null
+++ b/src/ipa/rpi/controller/geq_status.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * GEQ (green equalisation) control algorithm status
+ */
+#pragma once
+
+/* The "GEQ" algorithm calculates the green equalisation thresholds */
+
+struct GeqStatus {
+ uint16_t offset;
+ double slope;
+};
diff --git a/src/ipa/rpi/controller/hdr_algorithm.h b/src/ipa/rpi/controller/hdr_algorithm.h
new file mode 100644
index 00000000..b889d8fd
--- /dev/null
+++ b/src/ipa/rpi/controller/hdr_algorithm.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * HDR control algorithm interface
+ */
+#pragma once
+
+#include <vector>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class HdrAlgorithm : public Algorithm
+{
+public:
+ HdrAlgorithm(Controller *controller)
+ : Algorithm(controller) {}
+ /* An HDR algorithm must provide the following: */
+ virtual int setMode(std::string const &modeName) = 0;
+ virtual std::vector<unsigned int> getChannels() const = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/hdr_status.h b/src/ipa/rpi/controller/hdr_status.h
new file mode 100644
index 00000000..a4955778
--- /dev/null
+++ b/src/ipa/rpi/controller/hdr_status.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * HDR control algorithm status
+ */
+#pragma once
+
+#include <string>
+
+/*
+ * The HDR algorithm process method should post an HdrStatus into the image
+ * metadata under the tag "hdr.status".
+ */
+
+struct HdrStatus {
+ std::string mode;
+ std::string channel;
+};
diff --git a/src/ipa/rpi/controller/histogram.cpp b/src/ipa/rpi/controller/histogram.cpp
new file mode 100644
index 00000000..ba5b25dd
--- /dev/null
+++ b/src/ipa/rpi/controller/histogram.cpp
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram calculations
+ */
+#include <math.h>
+#include <stdio.h>
+
+#include "histogram.h"
+
+using namespace RPiController;
+
+uint64_t Histogram::cumulativeFreq(double bin) const
+{
+ if (bin <= 0)
+ return 0;
+ else if (bin >= bins())
+ return total();
+ int b = (int)bin;
+ return cumulative_[b] +
+ (bin - b) * (cumulative_[b + 1] - cumulative_[b]);
+}
+
+double Histogram::quantile(double q, int first, int last) const
+{
+ if (first == -1)
+ first = 0;
+ if (last == -1)
+ last = cumulative_.size() - 2;
+ assert(first <= last);
+ uint64_t items = q * total();
+ while (first < last) /* binary search to find the right bin */
+ {
+ int middle = (first + last) / 2;
+ if (cumulative_[middle + 1] > items)
+ last = middle; /* between first and middle */
+ else
+ first = middle + 1; /* after middle */
+ }
+ assert(items >= cumulative_[first] && items <= cumulative_[last + 1]);
+ double frac = cumulative_[first + 1] == cumulative_[first] ? 0
+ : (double)(items - cumulative_[first]) /
+ (cumulative_[first + 1] - cumulative_[first]);
+ return first + frac;
+}
+
+double Histogram::interBinMean(double binLo, double binHi) const
+{
+ assert(binHi >= binLo);
+ double sumBinFreq = 0, cumulFreq = 0;
+ for (double binNext = floor(binLo) + 1.0; binNext <= ceil(binHi);
+ binLo = binNext, binNext += 1.0) {
+ int bin = floor(binLo);
+ double freq = (cumulative_[bin + 1] - cumulative_[bin]) *
+ (std::min(binNext, binHi) - binLo);
+ sumBinFreq += bin * freq;
+ cumulFreq += freq;
+ }
+
+ if (cumulFreq == 0) {
+ /* interval had zero width or contained no weight? */
+ return binHi;
+ }
+
+ /* add 0.5 to give an average for bin mid-points */
+ return sumBinFreq / cumulFreq + 0.5;
+}
+
+double Histogram::interQuantileMean(double qLo, double qHi) const
+{
+ assert(qHi >= qLo);
+ double pLo = quantile(qLo);
+ double pHi = quantile(qHi, (int)pLo);
+ return interBinMean(pLo, pHi);
+}
diff --git a/src/ipa/rpi/controller/histogram.h b/src/ipa/rpi/controller/histogram.h
new file mode 100644
index 00000000..ab4e5e31
--- /dev/null
+++ b/src/ipa/rpi/controller/histogram.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram calculation interface
+ */
+#pragma once
+
+#include <stdint.h>
+#include <vector>
+#include <cassert>
+
+/*
+ * A simple histogram class, for use in particular to find "quantiles" and
+ * averages between "quantiles".
+ */
+
+namespace RPiController {
+
+class Histogram
+{
+public:
+ Histogram()
+ {
+ cumulative_.push_back(0);
+ }
+
+ template<typename T> Histogram(T *histogram, int num)
+ {
+ assert(num);
+ cumulative_.reserve(num + 1);
+ cumulative_.push_back(0);
+ for (int i = 0; i < num; i++)
+ cumulative_.push_back(cumulative_.back() +
+ histogram[i]);
+ }
+ uint32_t bins() const { return cumulative_.size() - 1; }
+ uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
+ /* Cumulative frequency up to a (fractional) point in a bin. */
+ uint64_t cumulativeFreq(double bin) const;
+ /* Return the mean value between two (fractional) bins. */
+ double interBinMean(double binLo, double binHi) const;
+ /*
+ * Return the (fractional) bin of the point q (0 <= q <= 1) through the
+ * histogram. Optionally provide limits to help.
+ */
+ double quantile(double q, int first = -1, int last = -1) const;
+ /* Return the average histogram bin value between the two quantiles. */
+ double interQuantileMean(double qLo, double qHi) const;
+
+private:
+ std::vector<uint64_t> cumulative_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/lux_status.h b/src/ipa/rpi/controller/lux_status.h
new file mode 100644
index 00000000..d8729f43
--- /dev/null
+++ b/src/ipa/rpi/controller/lux_status.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Lux control algorithm status
+ */
+#pragma once
+
+/*
+ * The "lux" algorithm looks at the (AGC) histogram statistics of the frame and
+ * estimates the current lux level of the scene. It does this by a simple ratio
+ * calculation comparing to a reference image that was taken in known conditions
+ * with known statistics and a properly measured lux level. There is a slight
+ * problem with aperture, in that it may be variable without the system knowing
+ * or being aware of it. In this case an external application may set a
+ * "current_aperture" value if it wishes, which would be used in place of the
+ * (presumably meaningless) value in the image metadata.
+ */
+
+struct LuxStatus {
+ double lux;
+ double aperture;
+};
diff --git a/src/ipa/rpi/controller/meson.build b/src/ipa/rpi/controller/meson.build
new file mode 100644
index 00000000..32a4d31c
--- /dev/null
+++ b/src/ipa/rpi/controller/meson.build
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: CC0-1.0
+
+rpi_ipa_controller_sources = files([
+ 'algorithm.cpp',
+ 'controller.cpp',
+ 'device_status.cpp',
+ 'histogram.cpp',
+ 'pwl.cpp',
+ 'rpi/af.cpp',
+ 'rpi/agc.cpp',
+ 'rpi/agc_channel.cpp',
+ 'rpi/alsc.cpp',
+ 'rpi/awb.cpp',
+ 'rpi/black_level.cpp',
+ 'rpi/cac.cpp',
+ 'rpi/ccm.cpp',
+ 'rpi/contrast.cpp',
+ 'rpi/denoise.cpp',
+ 'rpi/dpc.cpp',
+ 'rpi/geq.cpp',
+ 'rpi/hdr.cpp',
+ 'rpi/lux.cpp',
+ 'rpi/noise.cpp',
+ 'rpi/saturation.cpp',
+ 'rpi/sdn.cpp',
+ 'rpi/sharpen.cpp',
+ 'rpi/tonemap.cpp',
+])
+
+rpi_ipa_controller_deps = [
+ libcamera_private,
+]
+
+rpi_ipa_controller_lib = static_library('rpi_ipa_controller', rpi_ipa_controller_sources,
+ dependencies : rpi_ipa_controller_deps)
diff --git a/src/ipa/rpi/controller/metadata.h b/src/ipa/rpi/controller/metadata.h
new file mode 100644
index 00000000..b4650d25
--- /dev/null
+++ b/src/ipa/rpi/controller/metadata.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * general metadata class
+ */
+#pragma once
+
+/* A simple class for carrying arbitrary metadata, for example about an image. */
+
+#include <any>
+#include <map>
+#include <mutex>
+#include <string>
+
+#include <libcamera/base/thread_annotations.h>
+
+namespace RPiController {
+
+class LIBCAMERA_TSA_CAPABILITY("mutex") Metadata
+{
+public:
+ Metadata() = default;
+
+ Metadata(Metadata const &other)
+ {
+ std::scoped_lock otherLock(other.mutex_);
+ data_ = other.data_;
+ }
+
+ Metadata(Metadata &&other)
+ {
+ std::scoped_lock otherLock(other.mutex_);
+ data_ = std::move(other.data_);
+ other.data_.clear();
+ }
+
+ template<typename T>
+ void set(std::string const &tag, T const &value)
+ {
+ std::scoped_lock lock(mutex_);
+ data_[tag] = value;
+ }
+
+ template<typename T>
+ int get(std::string const &tag, T &value) const
+ {
+ std::scoped_lock lock(mutex_);
+ auto it = data_.find(tag);
+ if (it == data_.end())
+ return -1;
+ value = std::any_cast<T>(it->second);
+ return 0;
+ }
+
+ void clear()
+ {
+ std::scoped_lock lock(mutex_);
+ data_.clear();
+ }
+
+ Metadata &operator=(Metadata const &other)
+ {
+ std::scoped_lock lock(mutex_, other.mutex_);
+ data_ = other.data_;
+ return *this;
+ }
+
+ Metadata &operator=(Metadata &&other)
+ {
+ std::scoped_lock lock(mutex_, other.mutex_);
+ data_ = std::move(other.data_);
+ other.data_.clear();
+ return *this;
+ }
+
+ void merge(Metadata &other)
+ {
+ std::scoped_lock lock(mutex_, other.mutex_);
+ data_.merge(other.data_);
+ }
+
+ void mergeCopy(const Metadata &other)
+ {
+ std::scoped_lock lock(mutex_, other.mutex_);
+ /*
+ * If the metadata key exists, ignore this item and copy only
+ * unique key/value pairs.
+ */
+ data_.insert(other.data_.begin(), other.data_.end());
+ }
+
+ template<typename T>
+ T *getLocked(std::string const &tag)
+ {
+ /*
+ * This allows in-place access to the Metadata contents,
+ * for which you should be holding the lock.
+ */
+ auto it = data_.find(tag);
+ if (it == data_.end())
+ return nullptr;
+ return std::any_cast<T>(&it->second);
+ }
+
+ template<typename T>
+ void setLocked(std::string const &tag, T const &value)
+ {
+ /* Use this only if you're holding the lock yourself. */
+ data_[tag] = value;
+ }
+
+ /*
+ * Note: use of (lowercase) lock and unlock means you can create scoped
+ * locks with the standard lock classes.
+ * e.g. std::lock_guard<RPiController::Metadata> lock(metadata)
+ */
+ void lock() LIBCAMERA_TSA_ACQUIRE() { mutex_.lock(); }
+ auto try_lock() LIBCAMERA_TSA_ACQUIRE() { return mutex_.try_lock(); }
+ void unlock() LIBCAMERA_TSA_RELEASE() { mutex_.unlock(); }
+
+private:
+ mutable std::mutex mutex_;
+ std::map<std::string, std::any> data_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/noise_status.h b/src/ipa/rpi/controller/noise_status.h
new file mode 100644
index 00000000..1919da32
--- /dev/null
+++ b/src/ipa/rpi/controller/noise_status.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Noise control algorithm status
+ */
+#pragma once
+
+/* The "noise" algorithm stores an estimate of the noise profile for this image. */
+
+struct NoiseStatus {
+ double noiseConstant;
+ double noiseSlope;
+};
diff --git a/src/ipa/rpi/controller/pdaf_data.h b/src/ipa/rpi/controller/pdaf_data.h
new file mode 100644
index 00000000..779b987d
--- /dev/null
+++ b/src/ipa/rpi/controller/pdaf_data.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * PDAF Metadata
+ */
+#pragma once
+
+#include <stdint.h>
+
+#include "region_stats.h"
+
+namespace RPiController {
+
+struct PdafData {
+ /* Confidence, in arbitrary units */
+ uint16_t conf;
+ /* Phase error, in s16 Q4 format (S.11.4) */
+ int16_t phase;
+};
+
+using PdafRegions = RegionStats<PdafData>;
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/pwl.cpp b/src/ipa/rpi/controller/pwl.cpp
new file mode 100644
index 00000000..e3912376
--- /dev/null
+++ b/src/ipa/rpi/controller/pwl.cpp
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * piecewise linear functions
+ */
+
+#include <cassert>
+#include <cmath>
+#include <stdexcept>
+
+#include "pwl.h"
+
+using namespace RPiController;
+
+int Pwl::read(const libcamera::YamlObject &params)
+{
+ if (!params.size() || params.size() % 2)
+ return -EINVAL;
+
+ const auto &list = params.asList();
+
+ for (auto it = list.begin(); it != list.end(); it++) {
+ auto x = it->get<double>();
+ if (!x)
+ return -EINVAL;
+ if (it != list.begin() && *x <= points_.back().x)
+ return -EINVAL;
+
+ auto y = (++it)->get<double>();
+ if (!y)
+ return -EINVAL;
+
+ points_.push_back(Point(*x, *y));
+ }
+
+ return 0;
+}
+
+void Pwl::append(double x, double y, const double eps)
+{
+ if (points_.empty() || points_.back().x + eps < x)
+ points_.push_back(Point(x, y));
+}
+
+void Pwl::prepend(double x, double y, const double eps)
+{
+ if (points_.empty() || points_.front().x - eps > x)
+ points_.insert(points_.begin(), Point(x, y));
+}
+
+Pwl::Interval Pwl::domain() const
+{
+ return Interval(points_[0].x, points_[points_.size() - 1].x);
+}
+
+Pwl::Interval Pwl::range() const
+{
+ double lo = points_[0].y, hi = lo;
+ for (auto &p : points_)
+ lo = std::min(lo, p.y), hi = std::max(hi, p.y);
+ return Interval(lo, hi);
+}
+
+bool Pwl::empty() const
+{
+ return points_.empty();
+}
+
+double Pwl::eval(double x, int *spanPtr, bool updateSpan) const
+{
+ int span = findSpan(x, spanPtr && *spanPtr != -1 ? *spanPtr : points_.size() / 2 - 1);
+ if (spanPtr && updateSpan)
+ *spanPtr = span;
+ return points_[span].y +
+ (x - points_[span].x) * (points_[span + 1].y - points_[span].y) /
+ (points_[span + 1].x - points_[span].x);
+}
+
+int Pwl::findSpan(double x, int span) const
+{
+ /*
+ * Pwls are generally small, so linear search may well be faster than
+ * binary, though could review this if large PWls start turning up.
+ */
+ int lastSpan = points_.size() - 2;
+ /*
+ * some algorithms may call us with span pointing directly at the last
+ * control point
+ */
+ span = std::max(0, std::min(lastSpan, span));
+ while (span < lastSpan && x >= points_[span + 1].x)
+ span++;
+ while (span && x < points_[span].x)
+ span--;
+ return span;
+}
+
+Pwl::PerpType Pwl::invert(Point const &xy, Point &perp, int &span,
+ const double eps) const
+{
+ assert(span >= -1);
+ bool prevOffEnd = false;
+ for (span = span + 1; span < (int)points_.size() - 1; span++) {
+ Point spanVec = points_[span + 1] - points_[span];
+ double t = ((xy - points_[span]) % spanVec) / spanVec.len2();
+ if (t < -eps) /* off the start of this span */
+ {
+ if (span == 0) {
+ perp = points_[span];
+ return PerpType::Start;
+ } else if (prevOffEnd) {
+ perp = points_[span];
+ return PerpType::Vertex;
+ }
+ } else if (t > 1 + eps) /* off the end of this span */
+ {
+ if (span == (int)points_.size() - 2) {
+ perp = points_[span + 1];
+ return PerpType::End;
+ }
+ prevOffEnd = true;
+ } else /* a true perpendicular */
+ {
+ perp = points_[span] + spanVec * t;
+ return PerpType::Perpendicular;
+ }
+ }
+ return PerpType::None;
+}
+
+Pwl Pwl::inverse(bool *trueInverse, const double eps) const
+{
+ bool appended = false, prepended = false, neither = false;
+ Pwl inverse;
+
+ for (Point const &p : points_) {
+ if (inverse.empty())
+ inverse.append(p.y, p.x, eps);
+ else if (std::abs(inverse.points_.back().x - p.y) <= eps ||
+ std::abs(inverse.points_.front().x - p.y) <= eps)
+ /* do nothing */;
+ else if (p.y > inverse.points_.back().x) {
+ inverse.append(p.y, p.x, eps);
+ appended = true;
+ } else if (p.y < inverse.points_.front().x) {
+ inverse.prepend(p.y, p.x, eps);
+ prepended = true;
+ } else
+ neither = true;
+ }
+
+ /*
+ * This is not a proper inverse if we found ourselves putting points
+ * onto both ends of the inverse, or if there were points that couldn't
+ * go on either.
+ */
+ if (trueInverse)
+ *trueInverse = !(neither || (appended && prepended));
+
+ return inverse;
+}
+
+Pwl Pwl::compose(Pwl const &other, const double eps) const
+{
+ double thisX = points_[0].x, thisY = points_[0].y;
+ int thisSpan = 0, otherSpan = other.findSpan(thisY, 0);
+ Pwl result({ { thisX, other.eval(thisY, &otherSpan, false) } });
+ while (thisSpan != (int)points_.size() - 1) {
+ double dx = points_[thisSpan + 1].x - points_[thisSpan].x,
+ dy = points_[thisSpan + 1].y - points_[thisSpan].y;
+ if (std::abs(dy) > eps &&
+ otherSpan + 1 < (int)other.points_.size() &&
+ points_[thisSpan + 1].y >=
+ other.points_[otherSpan + 1].x + eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the next span in other
+ */
+ thisX = points_[thisSpan].x +
+ (other.points_[otherSpan + 1].x -
+ points_[thisSpan].y) *
+ dx / dy;
+ thisY = other.points_[++otherSpan].x;
+ } else if (std::abs(dy) > eps && otherSpan > 0 &&
+ points_[thisSpan + 1].y <=
+ other.points_[otherSpan - 1].x - eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the previous span in other
+ */
+ thisX = points_[thisSpan].x +
+ (other.points_[otherSpan + 1].x -
+ points_[thisSpan].y) *
+ dx / dy;
+ thisY = other.points_[--otherSpan].x;
+ } else {
+ /* we stay in the same span in other */
+ thisSpan++;
+ thisX = points_[thisSpan].x,
+ thisY = points_[thisSpan].y;
+ }
+ result.append(thisX, other.eval(thisY, &otherSpan, false),
+ eps);
+ }
+ return result;
+}
+
+void Pwl::map(std::function<void(double x, double y)> f) const
+{
+ for (auto &pt : points_)
+ f(pt.x, pt.y);
+}
+
+void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<void(double x, double y0, double y1)> f)
+{
+ int span0 = 0, span1 = 0;
+ double x = std::min(pwl0.points_[0].x, pwl1.points_[0].x);
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
+ while (span0 < (int)pwl0.points_.size() - 1 ||
+ span1 < (int)pwl1.points_.size() - 1) {
+ if (span0 == (int)pwl0.points_.size() - 1)
+ x = pwl1.points_[++span1].x;
+ else if (span1 == (int)pwl1.points_.size() - 1)
+ x = pwl0.points_[++span0].x;
+ else if (pwl0.points_[span0 + 1].x > pwl1.points_[span1 + 1].x)
+ x = pwl1.points_[++span1].x;
+ else
+ x = pwl0.points_[++span0].x;
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
+ }
+}
+
+Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ const double eps)
+{
+ Pwl result;
+ map2(pwl0, pwl1, [&](double x, double y0, double y1) {
+ result.append(x, f(x, y0, y1), eps);
+ });
+ return result;
+}
+
+void Pwl::matchDomain(Interval const &domain, bool clip, const double eps)
+{
+ int span = 0;
+ prepend(domain.start, eval(clip ? points_[0].x : domain.start, &span),
+ eps);
+ span = points_.size() - 2;
+ append(domain.end, eval(clip ? points_.back().x : domain.end, &span),
+ eps);
+}
+
+Pwl &Pwl::operator*=(double d)
+{
+ for (auto &pt : points_)
+ pt.y *= d;
+ return *this;
+}
+
+void Pwl::debug(FILE *fp) const
+{
+ fprintf(fp, "Pwl {\n");
+ for (auto &p : points_)
+ fprintf(fp, "\t(%g, %g)\n", p.x, p.y);
+ fprintf(fp, "}\n");
+}
diff --git a/src/ipa/rpi/controller/pwl.h b/src/ipa/rpi/controller/pwl.h
new file mode 100644
index 00000000..7d5e7e4d
--- /dev/null
+++ b/src/ipa/rpi/controller/pwl.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * piecewise linear functions interface
+ */
+#pragma once
+
+#include <functional>
+#include <math.h>
+#include <vector>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace RPiController {
+
+class Pwl
+{
+public:
+ struct Interval {
+ Interval(double _start, double _end)
+ : start(_start), end(_end)
+ {
+ }
+ double start, end;
+ bool contains(double value)
+ {
+ return value >= start && value <= end;
+ }
+ double clip(double value)
+ {
+ return value < start ? start
+ : (value > end ? end : value);
+ }
+ double len() const { return end - start; }
+ };
+ struct Point {
+ Point() : x(0), y(0) {}
+ Point(double _x, double _y)
+ : x(_x), y(_y) {}
+ double x, y;
+ Point operator-(Point const &p) const
+ {
+ return Point(x - p.x, y - p.y);
+ }
+ Point operator+(Point const &p) const
+ {
+ return Point(x + p.x, y + p.y);
+ }
+ double operator%(Point const &p) const
+ {
+ return x * p.x + y * p.y;
+ }
+ Point operator*(double f) const { return Point(x * f, y * f); }
+ Point operator/(double f) const { return Point(x / f, y / f); }
+ double len2() const { return x * x + y * y; }
+ double len() const { return sqrt(len2()); }
+ };
+ Pwl() {}
+ Pwl(std::vector<Point> const &points) : points_(points) {}
+ int read(const libcamera::YamlObject &params);
+ void append(double x, double y, const double eps = 1e-6);
+ void prepend(double x, double y, const double eps = 1e-6);
+ Interval domain() const;
+ Interval range() const;
+ bool empty() const;
+ /*
+ * Evaluate Pwl, optionally supplying an initial guess for the
+ * "span". The "span" may be optionally be updated. If you want to know
+ * the "span" value but don't have an initial guess you can set it to
+ * -1.
+ */
+ double eval(double x, int *spanPtr = nullptr,
+ bool updateSpan = true) const;
+ /*
+ * Find perpendicular closest to xy, starting from span+1 so you can
+ * call it repeatedly to check for multiple closest points (set span to
+ * -1 on the first call). Also returns "pseudo" perpendiculars; see
+ * PerpType enum.
+ */
+ enum class PerpType {
+ None, /* no perpendicular found */
+ Start, /* start of Pwl is closest point */
+ End, /* end of Pwl is closest point */
+ Vertex, /* vertex of Pwl is closest point */
+ Perpendicular /* true perpendicular found */
+ };
+ PerpType invert(Point const &xy, Point &perp, int &span,
+ const double eps = 1e-6) const;
+ /*
+ * Compute the inverse function. Indicate if it is a proper (true)
+ * inverse, or only a best effort (e.g. input was non-monotonic).
+ */
+ Pwl inverse(bool *trueInverse = nullptr, const double eps = 1e-6) const;
+ /* Compose two Pwls together, doing "this" first and "other" after. */
+ Pwl compose(Pwl const &other, const double eps = 1e-6) const;
+ /* Apply function to (x,y) values at every control point. */
+ void map(std::function<void(double x, double y)> f) const;
+ /*
+ * Apply function to (x, y0, y1) values wherever either Pwl has a
+ * control point.
+ */
+ static void map2(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<void(double x, double y0, double y1)> f);
+ /*
+ * Combine two Pwls, meaning we create a new Pwl where the y values are
+ * given by running f wherever either has a knot.
+ */
+ static Pwl
+ combine(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ const double eps = 1e-6);
+ /*
+ * Make "this" match (at least) the given domain. Any extension my be
+ * clipped or linear.
+ */
+ void matchDomain(Interval const &domain, bool clip = true,
+ const double eps = 1e-6);
+ Pwl &operator*=(double d);
+ void debug(FILE *fp = stdout) const;
+
+private:
+ int findSpan(double x, int span) const;
+ std::vector<Point> points_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/region_stats.h b/src/ipa/rpi/controller/region_stats.h
new file mode 100644
index 00000000..c60f7d9a
--- /dev/null
+++ b/src/ipa/rpi/controller/region_stats.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * Raspberry Pi region based statistics container
+ */
+#pragma once
+
+#include <array>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/geometry.h>
+
+namespace RPiController {
+
+template<typename T>
+class RegionStats
+{
+public:
+ struct Region {
+ T val;
+ uint32_t counted;
+ uint32_t uncounted;
+ };
+
+ RegionStats()
+ : size_({}), numFloating_(0), default_({})
+ {
+ }
+
+ void init(const libcamera::Size &size, unsigned int numFloating = 0)
+ {
+ size_ = size;
+ numFloating_ = numFloating;
+ regions_.clear();
+ regions_.resize(size_.width * size_.height + numFloating_);
+ }
+
+ void init(unsigned int num)
+ {
+ size_ = libcamera::Size(num, 1);
+ numFloating_ = 0;
+ regions_.clear();
+ regions_.resize(num);
+ }
+
+ unsigned int numRegions() const
+ {
+ return size_.width * size_.height;
+ }
+
+ unsigned int numFloatingRegions() const
+ {
+ return numFloating_;
+ }
+
+ libcamera::Size size() const
+ {
+ return size_;
+ }
+
+ void set(unsigned int index, const Region &region)
+ {
+ if (index >= numRegions())
+ return;
+ set_(index, region);
+ }
+
+ void set(const libcamera::Point &pos, const Region &region)
+ {
+ set(pos.y * size_.width + pos.x, region);
+ }
+
+ void setFloating(unsigned int index, const Region &region)
+ {
+ if (index >= numFloatingRegions())
+ return;
+ set(numRegions() + index, region);
+ }
+
+ const Region &get(unsigned int index) const
+ {
+ if (index >= numRegions())
+ return default_;
+ return get_(index);
+ }
+
+ const Region &get(const libcamera::Point &pos) const
+ {
+ return get(pos.y * size_.width + pos.x);
+ }
+
+ const Region &getFloating(unsigned int index) const
+ {
+ if (index >= numFloatingRegions())
+ return default_;
+ return get_(numRegions() + index);
+ }
+
+ typename std::vector<Region>::iterator begin() { return regions_.begin(); }
+ typename std::vector<Region>::iterator end() { return regions_.end(); }
+ typename std::vector<Region>::const_iterator begin() const { return regions_.begin(); }
+ typename std::vector<Region>::const_iterator end() const { return regions_.end(); }
+
+private:
+ void set_(unsigned int index, const Region &region)
+ {
+ regions_[index] = region;
+ }
+
+ const Region &get_(unsigned int index) const
+ {
+ return regions_[index];
+ }
+
+ libcamera::Size size_;
+ unsigned int numFloating_;
+ std::vector<Region> regions_;
+ Region default_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/af.cpp b/src/ipa/rpi/controller/rpi/af.cpp
new file mode 100644
index 00000000..c5fd8482
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/af.cpp
@@ -0,0 +1,797 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022-2023, Raspberry Pi Ltd
+ *
+ * Autofocus control algorithm
+ */
+
+#include "af.h"
+
+#include <iomanip>
+#include <math.h>
+#include <stdlib.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiAf)
+
+#define NAME "rpi.af"
+
+/*
+ * Default values for parameters. All may be overridden in the tuning file.
+ * Many of these values are sensor- or module-dependent; the defaults here
+ * assume IMX708 in a Raspberry Pi V3 camera with the standard lens.
+ *
+ * Here all focus values are in dioptres (1/m). They are converted to hardware
+ * units when written to status.lensSetting or returned from setLensPosition().
+ *
+ * Gain and delay values are relative to the update rate, since much (not all)
+ * of the delay is in the sensor and (for CDAF) ISP, not the lens mechanism;
+ * but note that algorithms are updated at no more than 30 Hz.
+ */
+
+Af::RangeDependentParams::RangeDependentParams()
+ : focusMin(0.0),
+ focusMax(12.0),
+ focusDefault(1.0)
+{
+}
+
+Af::SpeedDependentParams::SpeedDependentParams()
+ : stepCoarse(1.0),
+ stepFine(0.25),
+ contrastRatio(0.75),
+ pdafGain(-0.02),
+ pdafSquelch(0.125),
+ maxSlew(2.0),
+ pdafFrames(20),
+ dropoutFrames(6),
+ stepFrames(4)
+{
+}
+
+Af::CfgParams::CfgParams()
+ : confEpsilon(8),
+ confThresh(16),
+ confClip(512),
+ skipFrames(5),
+ map()
+{
+}
+
+template<typename T>
+static void readNumber(T &dest, const libcamera::YamlObject &params, char const *name)
+{
+ auto value = params[name].get<T>();
+ if (value)
+ dest = *value;
+ else
+ LOG(RPiAf, Warning) << "Missing parameter \"" << name << "\"";
+}
+
+void Af::RangeDependentParams::read(const libcamera::YamlObject &params)
+{
+
+ readNumber<double>(focusMin, params, "min");
+ readNumber<double>(focusMax, params, "max");
+ readNumber<double>(focusDefault, params, "default");
+}
+
+void Af::SpeedDependentParams::read(const libcamera::YamlObject &params)
+{
+ readNumber<double>(stepCoarse, params, "step_coarse");
+ readNumber<double>(stepFine, params, "step_fine");
+ readNumber<double>(contrastRatio, params, "contrast_ratio");
+ readNumber<double>(pdafGain, params, "pdaf_gain");
+ readNumber<double>(pdafSquelch, params, "pdaf_squelch");
+ readNumber<double>(maxSlew, params, "max_slew");
+ readNumber<uint32_t>(pdafFrames, params, "pdaf_frames");
+ readNumber<uint32_t>(dropoutFrames, params, "dropout_frames");
+ readNumber<uint32_t>(stepFrames, params, "step_frames");
+}
+
+int Af::CfgParams::read(const libcamera::YamlObject &params)
+{
+ if (params.contains("ranges")) {
+ auto &rr = params["ranges"];
+
+ if (rr.contains("normal"))
+ ranges[AfRangeNormal].read(rr["normal"]);
+ else
+ LOG(RPiAf, Warning) << "Missing range \"normal\"";
+
+ ranges[AfRangeMacro] = ranges[AfRangeNormal];
+ if (rr.contains("macro"))
+ ranges[AfRangeMacro].read(rr["macro"]);
+
+ ranges[AfRangeFull].focusMin = std::min(ranges[AfRangeNormal].focusMin,
+ ranges[AfRangeMacro].focusMin);
+ ranges[AfRangeFull].focusMax = std::max(ranges[AfRangeNormal].focusMax,
+ ranges[AfRangeMacro].focusMax);
+ ranges[AfRangeFull].focusDefault = ranges[AfRangeNormal].focusDefault;
+ if (rr.contains("full"))
+ ranges[AfRangeFull].read(rr["full"]);
+ } else
+ LOG(RPiAf, Warning) << "No ranges defined";
+
+ if (params.contains("speeds")) {
+ auto &ss = params["speeds"];
+
+ if (ss.contains("normal"))
+ speeds[AfSpeedNormal].read(ss["normal"]);
+ else
+ LOG(RPiAf, Warning) << "Missing speed \"normal\"";
+
+ speeds[AfSpeedFast] = speeds[AfSpeedNormal];
+ if (ss.contains("fast"))
+ speeds[AfSpeedFast].read(ss["fast"]);
+ } else
+ LOG(RPiAf, Warning) << "No speeds defined";
+
+ readNumber<uint32_t>(confEpsilon, params, "conf_epsilon");
+ readNumber<uint32_t>(confThresh, params, "conf_thresh");
+ readNumber<uint32_t>(confClip, params, "conf_clip");
+ readNumber<uint32_t>(skipFrames, params, "skip_frames");
+
+ if (params.contains("map"))
+ map.read(params["map"]);
+ else
+ LOG(RPiAf, Warning) << "No map defined";
+
+ return 0;
+}
+
+void Af::CfgParams::initialise()
+{
+ if (map.empty()) {
+ /* Default mapping from dioptres to hardware setting */
+ static constexpr double DefaultMapX0 = 0.0;
+ static constexpr double DefaultMapY0 = 445.0;
+ static constexpr double DefaultMapX1 = 15.0;
+ static constexpr double DefaultMapY1 = 925.0;
+
+ map.append(DefaultMapX0, DefaultMapY0);
+ map.append(DefaultMapX1, DefaultMapY1);
+ }
+}
+
+/* Af Algorithm class */
+
+static constexpr unsigned MaxWindows = 10;
+
+Af::Af(Controller *controller)
+ : AfAlgorithm(controller),
+ cfg_(),
+ range_(AfRangeNormal),
+ speed_(AfSpeedNormal),
+ mode_(AfAlgorithm::AfModeManual),
+ pauseFlag_(false),
+ statsRegion_(0, 0, 0, 0),
+ windows_(),
+ useWindows_(false),
+ phaseWeights_(),
+ contrastWeights_(),
+ scanState_(ScanState::Idle),
+ initted_(false),
+ ftarget_(-1.0),
+ fsmooth_(-1.0),
+ prevContrast_(0.0),
+ skipCount_(0),
+ stepCount_(0),
+ dropCount_(0),
+ scanMaxContrast_(0.0),
+ scanMinContrast_(1.0e9),
+ scanData_(),
+ reportState_(AfState::Idle)
+{
+ /*
+ * Reserve space for data, to reduce memory fragmentation. It's too early
+ * to query the size of the PDAF (from camera) and Contrast (from ISP)
+ * statistics, but these are plausible upper bounds.
+ */
+ phaseWeights_.w.reserve(16 * 12);
+ contrastWeights_.w.reserve(getHardwareConfig().focusRegions.width *
+ getHardwareConfig().focusRegions.height);
+ scanData_.reserve(32);
+}
+
+Af::~Af()
+{
+}
+
+char const *Af::name() const
+{
+ return NAME;
+}
+
+int Af::read(const libcamera::YamlObject &params)
+{
+ return cfg_.read(params);
+}
+
+void Af::initialise()
+{
+ cfg_.initialise();
+}
+
+void Af::switchMode(CameraMode const &cameraMode, [[maybe_unused]] Metadata *metadata)
+{
+ (void)metadata;
+
+ /* Assume that PDAF and Focus stats grids cover the visible area */
+ statsRegion_.x = (int)cameraMode.cropX;
+ statsRegion_.y = (int)cameraMode.cropY;
+ statsRegion_.width = (unsigned)(cameraMode.width * cameraMode.scaleX);
+ statsRegion_.height = (unsigned)(cameraMode.height * cameraMode.scaleY);
+ LOG(RPiAf, Debug) << "switchMode: statsRegion: "
+ << statsRegion_.x << ','
+ << statsRegion_.y << ','
+ << statsRegion_.width << ','
+ << statsRegion_.height;
+ invalidateWeights();
+
+ if (scanState_ >= ScanState::Coarse && scanState_ < ScanState::Settle) {
+ /*
+ * If a scan was in progress, re-start it, as CDAF statistics
+ * may have changed. Though if the application is just about
+ * to take a still picture, this will not help...
+ */
+ startProgrammedScan();
+ }
+ skipCount_ = cfg_.skipFrames;
+}
+
+void Af::computeWeights(RegionWeights *wgts, unsigned rows, unsigned cols)
+{
+ wgts->rows = rows;
+ wgts->cols = cols;
+ wgts->sum = 0;
+ wgts->w.resize(rows * cols);
+ std::fill(wgts->w.begin(), wgts->w.end(), 0);
+
+ if (rows > 0 && cols > 0 && useWindows_ &&
+ statsRegion_.height >= rows && statsRegion_.width >= cols) {
+ /*
+ * Here we just merge all of the given windows, weighted by area.
+ * \todo Perhaps a better approach might be to find the phase in each
+ * window and choose either the closest or the highest-confidence one?
+ * Ensure weights sum to less than (1<<16). 46080 is a "round number"
+ * below 65536, for better rounding when window size is a simple
+ * fraction of image dimensions.
+ */
+ const unsigned maxCellWeight = 46080u / (MaxWindows * rows * cols);
+ const unsigned cellH = statsRegion_.height / rows;
+ const unsigned cellW = statsRegion_.width / cols;
+ const unsigned cellA = cellH * cellW;
+
+ for (auto &w : windows_) {
+ for (unsigned r = 0; r < rows; ++r) {
+ int y0 = std::max(statsRegion_.y + (int)(cellH * r), w.y);
+ int y1 = std::min(statsRegion_.y + (int)(cellH * (r + 1)),
+ w.y + (int)(w.height));
+ if (y0 >= y1)
+ continue;
+ y1 -= y0;
+ for (unsigned c = 0; c < cols; ++c) {
+ int x0 = std::max(statsRegion_.x + (int)(cellW * c), w.x);
+ int x1 = std::min(statsRegion_.x + (int)(cellW * (c + 1)),
+ w.x + (int)(w.width));
+ if (x0 >= x1)
+ continue;
+ unsigned a = y1 * (x1 - x0);
+ a = (maxCellWeight * a + cellA - 1) / cellA;
+ wgts->w[r * cols + c] += a;
+ wgts->sum += a;
+ }
+ }
+ }
+ }
+
+ if (wgts->sum == 0) {
+ /* Default AF window is the middle 1/2 width of the middle 1/3 height */
+ for (unsigned r = rows / 3; r < rows - rows / 3; ++r) {
+ for (unsigned c = cols / 4; c < cols - cols / 4; ++c) {
+ wgts->w[r * cols + c] = 1;
+ wgts->sum += 1;
+ }
+ }
+ }
+}
+
+void Af::invalidateWeights()
+{
+ phaseWeights_.sum = 0;
+ contrastWeights_.sum = 0;
+}
+
+bool Af::getPhase(PdafRegions const &regions, double &phase, double &conf)
+{
+ libcamera::Size size = regions.size();
+ if (size.height != phaseWeights_.rows || size.width != phaseWeights_.cols ||
+ phaseWeights_.sum == 0) {
+ LOG(RPiAf, Debug) << "Recompute Phase weights " << size.width << 'x' << size.height;
+ computeWeights(&phaseWeights_, size.height, size.width);
+ }
+
+ uint32_t sumWc = 0;
+ int64_t sumWcp = 0;
+ for (unsigned i = 0; i < regions.numRegions(); ++i) {
+ unsigned w = phaseWeights_.w[i];
+ if (w) {
+ const PdafData &data = regions.get(i).val;
+ unsigned c = data.conf;
+ if (c >= cfg_.confThresh) {
+ if (c > cfg_.confClip)
+ c = cfg_.confClip;
+ c -= (cfg_.confThresh >> 2);
+ sumWc += w * c;
+ c -= (cfg_.confThresh >> 2);
+ sumWcp += (int64_t)(w * c) * (int64_t)data.phase;
+ }
+ }
+ }
+
+ if (0 < phaseWeights_.sum && phaseWeights_.sum <= sumWc) {
+ phase = (double)sumWcp / (double)sumWc;
+ conf = (double)sumWc / (double)phaseWeights_.sum;
+ return true;
+ } else {
+ phase = 0.0;
+ conf = 0.0;
+ return false;
+ }
+}
+
+double Af::getContrast(const FocusRegions &focusStats)
+{
+ libcamera::Size size = focusStats.size();
+ if (size.height != contrastWeights_.rows ||
+ size.width != contrastWeights_.cols || contrastWeights_.sum == 0) {
+ LOG(RPiAf, Debug) << "Recompute Contrast weights "
+ << size.width << 'x' << size.height;
+ computeWeights(&contrastWeights_, size.height, size.width);
+ }
+
+ uint64_t sumWc = 0;
+ for (unsigned i = 0; i < focusStats.numRegions(); ++i)
+ sumWc += contrastWeights_.w[i] * focusStats.get(i).val;
+
+ return (contrastWeights_.sum > 0) ? ((double)sumWc / (double)contrastWeights_.sum) : 0.0;
+}
+
+void Af::doPDAF(double phase, double conf)
+{
+ /* Apply loop gain */
+ phase *= cfg_.speeds[speed_].pdafGain;
+
+ if (mode_ == AfModeContinuous) {
+ /*
+ * PDAF in Continuous mode. Scale down lens movement when
+ * delta is small or confidence is low, to suppress wobble.
+ */
+ phase *= conf / (conf + cfg_.confEpsilon);
+ if (std::abs(phase) < cfg_.speeds[speed_].pdafSquelch) {
+ double a = phase / cfg_.speeds[speed_].pdafSquelch;
+ phase *= a * a;
+ }
+ } else {
+ /*
+ * PDAF in triggered-auto mode. Allow early termination when
+ * phase delta is small; scale down lens movements towards
+ * the end of the sequence, to ensure a stable image.
+ */
+ if (stepCount_ >= cfg_.speeds[speed_].stepFrames) {
+ if (std::abs(phase) < cfg_.speeds[speed_].pdafSquelch)
+ stepCount_ = cfg_.speeds[speed_].stepFrames;
+ } else
+ phase *= stepCount_ / cfg_.speeds[speed_].stepFrames;
+ }
+
+ /* Apply slew rate limit. Report failure if out of bounds. */
+ if (phase < -cfg_.speeds[speed_].maxSlew) {
+ phase = -cfg_.speeds[speed_].maxSlew;
+ reportState_ = (ftarget_ <= cfg_.ranges[range_].focusMin) ? AfState::Failed
+ : AfState::Scanning;
+ } else if (phase > cfg_.speeds[speed_].maxSlew) {
+ phase = cfg_.speeds[speed_].maxSlew;
+ reportState_ = (ftarget_ >= cfg_.ranges[range_].focusMax) ? AfState::Failed
+ : AfState::Scanning;
+ } else
+ reportState_ = AfState::Focused;
+
+ ftarget_ = fsmooth_ + phase;
+}
+
+bool Af::earlyTerminationByPhase(double phase)
+{
+ if (scanData_.size() > 0 &&
+ scanData_[scanData_.size() - 1].conf >= cfg_.confEpsilon) {
+ double oldFocus = scanData_[scanData_.size() - 1].focus;
+ double oldPhase = scanData_[scanData_.size() - 1].phase;
+
+ /*
+ * Check that the gradient is finite and has the expected sign;
+ * Interpolate/extrapolate the lens position for zero phase.
+ * Check that the extrapolation is well-conditioned.
+ */
+ if ((ftarget_ - oldFocus) * (phase - oldPhase) > 0.0) {
+ double param = phase / (phase - oldPhase);
+ if (-3.0 <= param && param <= 3.5) {
+ ftarget_ += param * (oldFocus - ftarget_);
+ LOG(RPiAf, Debug) << "ETBP: param=" << param;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+double Af::findPeak(unsigned i) const
+{
+ double f = scanData_[i].focus;
+
+ if (i > 0 && i + 1 < scanData_.size()) {
+ double dropLo = scanData_[i].contrast - scanData_[i - 1].contrast;
+ double dropHi = scanData_[i].contrast - scanData_[i + 1].contrast;
+ if (0.0 <= dropLo && dropLo < dropHi) {
+ double param = 0.3125 * (1.0 - dropLo / dropHi) * (1.6 - dropLo / dropHi);
+ f += param * (scanData_[i - 1].focus - f);
+ } else if (0.0 <= dropHi && dropHi < dropLo) {
+ double param = 0.3125 * (1.0 - dropHi / dropLo) * (1.6 - dropHi / dropLo);
+ f += param * (scanData_[i + 1].focus - f);
+ }
+ }
+
+ LOG(RPiAf, Debug) << "FindPeak: " << f;
+ return f;
+}
+
+void Af::doScan(double contrast, double phase, double conf)
+{
+ /* Record lens position, contrast and phase values for the current scan */
+ if (scanData_.empty() || contrast > scanMaxContrast_) {
+ scanMaxContrast_ = contrast;
+ scanMaxIndex_ = scanData_.size();
+ }
+ if (contrast < scanMinContrast_)
+ scanMinContrast_ = contrast;
+ scanData_.emplace_back(ScanRecord{ ftarget_, contrast, phase, conf });
+
+ if (scanState_ == ScanState::Coarse) {
+ if (ftarget_ >= cfg_.ranges[range_].focusMax ||
+ contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) {
+ /*
+ * Finished course scan, or termination based on contrast.
+ * Jump to just after max contrast and start fine scan.
+ */
+ ftarget_ = std::min(ftarget_, findPeak(scanMaxIndex_) +
+ 2.0 * cfg_.speeds[speed_].stepFine);
+ scanState_ = ScanState::Fine;
+ scanData_.clear();
+ } else
+ ftarget_ += cfg_.speeds[speed_].stepCoarse;
+ } else { /* ScanState::Fine */
+ if (ftarget_ <= cfg_.ranges[range_].focusMin || scanData_.size() >= 5 ||
+ contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) {
+ /*
+ * Finished fine scan, or termination based on contrast.
+ * Use quadratic peak-finding to find best contrast position.
+ */
+ ftarget_ = findPeak(scanMaxIndex_);
+ scanState_ = ScanState::Settle;
+ } else
+ ftarget_ -= cfg_.speeds[speed_].stepFine;
+ }
+
+ stepCount_ = (ftarget_ == fsmooth_) ? 0 : cfg_.speeds[speed_].stepFrames;
+}
+
+void Af::doAF(double contrast, double phase, double conf)
+{
+ /* Skip frames at startup and after sensor mode change */
+ if (skipCount_ > 0) {
+ LOG(RPiAf, Debug) << "SKIP";
+ skipCount_--;
+ return;
+ }
+
+ if (scanState_ == ScanState::Pdaf) {
+ /*
+ * Use PDAF closed-loop control whenever available, in both CAF
+ * mode and (for a limited number of iterations) when triggered.
+ * If PDAF fails (due to poor contrast, noise or large defocus),
+ * fall back to a CDAF-based scan. To avoid "nuisance" scans,
+ * scan only after a number of frames with low PDAF confidence.
+ */
+ if (conf > (dropCount_ ? 1.0 : 0.25) * cfg_.confEpsilon) {
+ doPDAF(phase, conf);
+ if (stepCount_ > 0)
+ stepCount_--;
+ else if (mode_ != AfModeContinuous)
+ scanState_ = ScanState::Idle;
+ dropCount_ = 0;
+ } else if (++dropCount_ == cfg_.speeds[speed_].dropoutFrames)
+ startProgrammedScan();
+ } else if (scanState_ >= ScanState::Coarse && fsmooth_ == ftarget_) {
+ /*
+ * Scanning sequence. This means PDAF has become unavailable.
+ * Allow a delay between steps for CDAF FoM statistics to be
+ * updated, and a "settling time" at the end of the sequence.
+ * [A coarse or fine scan can be abandoned if two PDAF samples
+ * allow direct interpolation of the zero-phase lens position.]
+ */
+ if (stepCount_ > 0)
+ stepCount_--;
+ else if (scanState_ == ScanState::Settle) {
+ if (prevContrast_ >= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_ &&
+ scanMinContrast_ <= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_)
+ reportState_ = AfState::Focused;
+ else
+ reportState_ = AfState::Failed;
+ if (mode_ == AfModeContinuous && !pauseFlag_ &&
+ cfg_.speeds[speed_].dropoutFrames > 0)
+ scanState_ = ScanState::Pdaf;
+ else
+ scanState_ = ScanState::Idle;
+ scanData_.clear();
+ } else if (conf >= cfg_.confEpsilon && earlyTerminationByPhase(phase)) {
+ scanState_ = ScanState::Settle;
+ stepCount_ = (mode_ == AfModeContinuous) ? 0
+ : cfg_.speeds[speed_].stepFrames;
+ } else
+ doScan(contrast, phase, conf);
+ }
+}
+
+void Af::updateLensPosition()
+{
+ if (scanState_ >= ScanState::Pdaf) {
+ ftarget_ = std::clamp(ftarget_,
+ cfg_.ranges[range_].focusMin,
+ cfg_.ranges[range_].focusMax);
+ }
+
+ if (initted_) {
+ /* from a known lens position: apply slew rate limit */
+ fsmooth_ = std::clamp(ftarget_,
+ fsmooth_ - cfg_.speeds[speed_].maxSlew,
+ fsmooth_ + cfg_.speeds[speed_].maxSlew);
+ } else {
+ /* from an unknown position: go straight to target, but add delay */
+ fsmooth_ = ftarget_;
+ initted_ = true;
+ skipCount_ = cfg_.skipFrames;
+ }
+}
+
+void Af::startAF()
+{
+ /* Use PDAF if the tuning file allows it; else CDAF. */
+ if (cfg_.speeds[speed_].dropoutFrames > 0 &&
+ (mode_ == AfModeContinuous || cfg_.speeds[speed_].pdafFrames > 0)) {
+ if (!initted_) {
+ ftarget_ = cfg_.ranges[range_].focusDefault;
+ updateLensPosition();
+ }
+ stepCount_ = (mode_ == AfModeContinuous) ? 0 : cfg_.speeds[speed_].pdafFrames;
+ scanState_ = ScanState::Pdaf;
+ scanData_.clear();
+ dropCount_ = 0;
+ reportState_ = AfState::Scanning;
+ } else
+ startProgrammedScan();
+}
+
+void Af::startProgrammedScan()
+{
+ ftarget_ = cfg_.ranges[range_].focusMin;
+ updateLensPosition();
+ scanState_ = ScanState::Coarse;
+ scanMaxContrast_ = 0.0;
+ scanMinContrast_ = 1.0e9;
+ scanMaxIndex_ = 0;
+ scanData_.clear();
+ stepCount_ = cfg_.speeds[speed_].stepFrames;
+ reportState_ = AfState::Scanning;
+}
+
+void Af::goIdle()
+{
+ scanState_ = ScanState::Idle;
+ reportState_ = AfState::Idle;
+ scanData_.clear();
+}
+
+/*
+ * PDAF phase data are available in prepare(), but CDAF statistics are not
+ * available until process(). We are gambling on the availability of PDAF.
+ * To expedite feedback control using PDAF, issue the V4L2 lens control from
+ * prepare(). Conversely, during scans, we must allow an extra frame delay
+ * between steps, to retrieve CDAF statistics from the previous process()
+ * so we can terminate the scan early without having to change our minds.
+ */
+
+void Af::prepare(Metadata *imageMetadata)
+{
+ /* Initialize for triggered scan or start of CAF mode */
+ if (scanState_ == ScanState::Trigger)
+ startAF();
+
+ if (initted_) {
+ /* Get PDAF from the embedded metadata, and run AF algorithm core */
+ PdafRegions regions;
+ double phase = 0.0, conf = 0.0;
+ double oldFt = ftarget_;
+ double oldFs = fsmooth_;
+ ScanState oldSs = scanState_;
+ uint32_t oldSt = stepCount_;
+ if (imageMetadata->get("pdaf.regions", regions) == 0)
+ getPhase(regions, phase, conf);
+ doAF(prevContrast_, phase, conf);
+ updateLensPosition();
+ LOG(RPiAf, Debug) << std::fixed << std::setprecision(2)
+ << static_cast<unsigned int>(reportState_)
+ << " sst" << static_cast<unsigned int>(oldSs)
+ << "->" << static_cast<unsigned int>(scanState_)
+ << " stp" << oldSt << "->" << stepCount_
+ << " ft" << oldFt << "->" << ftarget_
+ << " fs" << oldFs << "->" << fsmooth_
+ << " cont=" << (int)prevContrast_
+ << " phase=" << (int)phase << " conf=" << (int)conf;
+ }
+
+ /* Report status and produce new lens setting */
+ AfStatus status;
+ if (pauseFlag_)
+ status.pauseState = (scanState_ == ScanState::Idle) ? AfPauseState::Paused
+ : AfPauseState::Pausing;
+ else
+ status.pauseState = AfPauseState::Running;
+
+ if (mode_ == AfModeAuto && scanState_ != ScanState::Idle)
+ status.state = AfState::Scanning;
+ else
+ status.state = reportState_;
+ status.lensSetting = initted_ ? std::optional<int>(cfg_.map.eval(fsmooth_))
+ : std::nullopt;
+ imageMetadata->set("af.status", status);
+}
+
+void Af::process(StatisticsPtr &stats, [[maybe_unused]] Metadata *imageMetadata)
+{
+ (void)imageMetadata;
+ prevContrast_ = getContrast(stats->focusRegions);
+}
+
+/* Controls */
+
+void Af::setRange(AfRange r)
+{
+ LOG(RPiAf, Debug) << "setRange: " << (unsigned)r;
+ if (r < AfAlgorithm::AfRangeMax)
+ range_ = r;
+}
+
+void Af::setSpeed(AfSpeed s)
+{
+ LOG(RPiAf, Debug) << "setSpeed: " << (unsigned)s;
+ if (s < AfAlgorithm::AfSpeedMax) {
+ if (scanState_ == ScanState::Pdaf &&
+ cfg_.speeds[s].pdafFrames > cfg_.speeds[speed_].pdafFrames)
+ stepCount_ += cfg_.speeds[s].pdafFrames - cfg_.speeds[speed_].pdafFrames;
+ speed_ = s;
+ }
+}
+
+void Af::setMetering(bool mode)
+{
+ if (useWindows_ != mode) {
+ useWindows_ = mode;
+ invalidateWeights();
+ }
+}
+
+void Af::setWindows(libcamera::Span<libcamera::Rectangle const> const &wins)
+{
+ windows_.clear();
+ for (auto &w : wins) {
+ LOG(RPiAf, Debug) << "Window: "
+ << w.x << ", "
+ << w.y << ", "
+ << w.width << ", "
+ << w.height;
+ windows_.push_back(w);
+ if (windows_.size() >= MaxWindows)
+ break;
+ }
+
+ if (useWindows_)
+ invalidateWeights();
+}
+
+bool Af::setLensPosition(double dioptres, int *hwpos)
+{
+ bool changed = false;
+
+ if (mode_ == AfModeManual) {
+ LOG(RPiAf, Debug) << "setLensPosition: " << dioptres;
+ ftarget_ = cfg_.map.domain().clip(dioptres);
+ changed = !(initted_ && fsmooth_ == ftarget_);
+ updateLensPosition();
+ }
+
+ if (hwpos)
+ *hwpos = cfg_.map.eval(fsmooth_);
+
+ return changed;
+}
+
+std::optional<double> Af::getLensPosition() const
+{
+ /*
+ * \todo We ought to perform some precise timing here to determine
+ * the current lens position.
+ */
+ return initted_ ? std::optional<double>(fsmooth_) : std::nullopt;
+}
+
+void Af::cancelScan()
+{
+ LOG(RPiAf, Debug) << "cancelScan";
+ if (mode_ == AfModeAuto)
+ goIdle();
+}
+
+void Af::triggerScan()
+{
+ LOG(RPiAf, Debug) << "triggerScan";
+ if (mode_ == AfModeAuto && scanState_ == ScanState::Idle)
+ scanState_ = ScanState::Trigger;
+}
+
+void Af::setMode(AfAlgorithm::AfMode mode)
+{
+ LOG(RPiAf, Debug) << "setMode: " << (unsigned)mode;
+ if (mode_ != mode) {
+ mode_ = mode;
+ pauseFlag_ = false;
+ if (mode == AfModeContinuous)
+ scanState_ = ScanState::Trigger;
+ else if (mode != AfModeAuto || scanState_ < ScanState::Coarse)
+ goIdle();
+ }
+}
+
+AfAlgorithm::AfMode Af::getMode() const
+{
+ return mode_;
+}
+
+void Af::pause(AfAlgorithm::AfPause pause)
+{
+ LOG(RPiAf, Debug) << "pause: " << (unsigned)pause;
+ if (mode_ == AfModeContinuous) {
+ if (pause == AfPauseResume && pauseFlag_) {
+ pauseFlag_ = false;
+ if (scanState_ < ScanState::Coarse)
+ scanState_ = ScanState::Trigger;
+ } else if (pause != AfPauseResume && !pauseFlag_) {
+ pauseFlag_ = true;
+ if (pause == AfPauseImmediate || scanState_ < ScanState::Coarse)
+ goIdle();
+ }
+ }
+}
+
+// Register algorithm with the system.
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Af(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/af.h b/src/ipa/rpi/controller/rpi/af.h
new file mode 100644
index 00000000..2617e2ac
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/af.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022-2023, Raspberry Pi Ltd
+ *
+ * Autofocus control algorithm
+ */
+#pragma once
+
+#include "../af_algorithm.h"
+#include "../af_status.h"
+#include "../pdaf_data.h"
+#include "../pwl.h"
+
+/*
+ * This algorithm implements a hybrid of CDAF and PDAF, favouring PDAF.
+ *
+ * Whenever PDAF is available, it is used in a continuous feedback loop.
+ * When triggered in auto mode, we simply enable AF for a limited number
+ * of frames (it may terminate early if the delta becomes small enough).
+ *
+ * When PDAF confidence is low (due e.g. to low contrast or extreme defocus)
+ * or PDAF data are absent, fall back to CDAF with a programmed scan pattern.
+ * A coarse and fine scan are performed, using ISP's CDAF focus FoM to
+ * estimate the lens position with peak contrast. This is slower due to
+ * extra latency in the ISP, and requires a settling time between steps.
+ *
+ * Some hysteresis is applied to the switch between PDAF and CDAF, to avoid
+ * "nuisance" scans. During each interval where PDAF is not working, only
+ * ONE scan will be performed; CAF cannot track objects using CDAF alone.
+ *
+ */
+
+namespace RPiController {
+
+class Af : public AfAlgorithm
+{
+public:
+ Af(Controller *controller = NULL);
+ ~Af();
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+
+ /* IPA calls */
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+ /* controls */
+ void setRange(AfRange range) override;
+ void setSpeed(AfSpeed speed) override;
+ void setMetering(bool use_windows) override;
+ void setWindows(libcamera::Span<libcamera::Rectangle const> const &wins) override;
+ void setMode(AfMode mode) override;
+ AfMode getMode() const override;
+ bool setLensPosition(double dioptres, int32_t *hwpos) override;
+ std::optional<double> getLensPosition() const override;
+ void triggerScan() override;
+ void cancelScan() override;
+ void pause(AfPause pause) override;
+
+private:
+ enum class ScanState {
+ Idle = 0,
+ Trigger,
+ Pdaf,
+ Coarse,
+ Fine,
+ Settle
+ };
+
+ struct RangeDependentParams {
+ double focusMin; /* lower (far) limit in dipotres */
+ double focusMax; /* upper (near) limit in dioptres */
+ double focusDefault; /* default setting ("hyperfocal") */
+
+ RangeDependentParams();
+ void read(const libcamera::YamlObject &params);
+ };
+
+ struct SpeedDependentParams {
+ double stepCoarse; /* used for scans */
+ double stepFine; /* used for scans */
+ double contrastRatio; /* used for scan termination and reporting */
+ double pdafGain; /* coefficient for PDAF feedback loop */
+ double pdafSquelch; /* PDAF stability parameter (device-specific) */
+ double maxSlew; /* limit for lens movement per frame */
+ uint32_t pdafFrames; /* number of iterations when triggered */
+ uint32_t dropoutFrames; /* number of non-PDAF frames to switch to CDAF */
+ uint32_t stepFrames; /* frames to skip in between steps of a scan */
+
+ SpeedDependentParams();
+ void read(const libcamera::YamlObject &params);
+ };
+
+ struct CfgParams {
+ RangeDependentParams ranges[AfRangeMax];
+ SpeedDependentParams speeds[AfSpeedMax];
+ uint32_t confEpsilon; /* PDAF hysteresis threshold (sensor-specific) */
+ uint32_t confThresh; /* PDAF confidence cell min (sensor-specific) */
+ uint32_t confClip; /* PDAF confidence cell max (sensor-specific) */
+ uint32_t skipFrames; /* frames to skip at start or modeswitch */
+ Pwl map; /* converts dioptres -> lens driver position */
+
+ CfgParams();
+ int read(const libcamera::YamlObject &params);
+ void initialise();
+ };
+
+ struct ScanRecord {
+ double focus;
+ double contrast;
+ double phase;
+ double conf;
+ };
+
+ struct RegionWeights {
+ unsigned rows;
+ unsigned cols;
+ uint32_t sum;
+ std::vector<uint16_t> w;
+
+ RegionWeights()
+ : rows(0), cols(0), sum(0), w() {}
+ };
+
+ void computeWeights(RegionWeights *wgts, unsigned rows, unsigned cols);
+ void invalidateWeights();
+ bool getPhase(PdafRegions const &regions, double &phase, double &conf);
+ double getContrast(const FocusRegions &focusStats);
+ void doPDAF(double phase, double conf);
+ bool earlyTerminationByPhase(double phase);
+ double findPeak(unsigned index) const;
+ void doScan(double contrast, double phase, double conf);
+ void doAF(double contrast, double phase, double conf);
+ void updateLensPosition();
+ void startAF();
+ void startProgrammedScan();
+ void goIdle();
+
+ /* Configuration and settings */
+ CfgParams cfg_;
+ AfRange range_;
+ AfSpeed speed_;
+ AfMode mode_;
+ bool pauseFlag_;
+ libcamera::Rectangle statsRegion_;
+ std::vector<libcamera::Rectangle> windows_;
+ bool useWindows_;
+ RegionWeights phaseWeights_;
+ RegionWeights contrastWeights_;
+
+ /* Working state. */
+ ScanState scanState_;
+ bool initted_;
+ double ftarget_, fsmooth_;
+ double prevContrast_;
+ unsigned skipCount_, stepCount_, dropCount_;
+ unsigned scanMaxIndex_;
+ double scanMaxContrast_, scanMinContrast_;
+ std::vector<ScanRecord> scanData_;
+ AfState reportState_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/rpi/agc.cpp b/src/ipa/rpi/controller/rpi/agc.cpp
new file mode 100644
index 00000000..fcf7aec9
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/agc.cpp
@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm
+ */
+
+#include "agc.h"
+
+#include <libcamera/base/log.h>
+
+#include "../metadata.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(RPiAgc)
+
+#define NAME "rpi.agc"
+
+Agc::Agc(Controller *controller)
+ : AgcAlgorithm(controller),
+ activeChannels_({ 0 }), index_(0)
+{
+}
+
+char const *Agc::name() const
+{
+ return NAME;
+}
+
+int Agc::read(const libcamera::YamlObject &params)
+{
+ /*
+ * When there is only a single channel we can read the old style syntax.
+ * Otherwise we expect a "channels" keyword followed by a list of configurations.
+ */
+ if (!params.contains("channels")) {
+ LOG(RPiAgc, Debug) << "Single channel only";
+ channelTotalExposures_.resize(1, 0s);
+ channelData_.emplace_back();
+ return channelData_.back().channel.read(params, getHardwareConfig());
+ }
+
+ const auto &channels = params["channels"].asList();
+ for (auto ch = channels.begin(); ch != channels.end(); ch++) {
+ LOG(RPiAgc, Debug) << "Read AGC channel";
+ channelData_.emplace_back();
+ int ret = channelData_.back().channel.read(*ch, getHardwareConfig());
+ if (ret)
+ return ret;
+ }
+
+ LOG(RPiAgc, Debug) << "Read " << channelData_.size() << " channel(s)";
+ if (channelData_.empty()) {
+ LOG(RPiAgc, Error) << "No AGC channels provided";
+ return -1;
+ }
+
+ channelTotalExposures_.resize(channelData_.size(), 0s);
+
+ return 0;
+}
+
+int Agc::checkChannel(unsigned int channelIndex) const
+{
+ if (channelIndex >= channelData_.size()) {
+ LOG(RPiAgc, Warning) << "AGC channel " << channelIndex << " not available";
+ return -1;
+ }
+
+ return 0;
+}
+
+void Agc::disableAuto()
+{
+ LOG(RPiAgc, Debug) << "disableAuto";
+
+ /* All channels are enabled/disabled together. */
+ for (auto &data : channelData_)
+ data.channel.disableAuto();
+}
+
+void Agc::enableAuto()
+{
+ LOG(RPiAgc, Debug) << "enableAuto";
+
+ /* All channels are enabled/disabled together. */
+ for (auto &data : channelData_)
+ data.channel.enableAuto();
+}
+
+unsigned int Agc::getConvergenceFrames() const
+{
+ /* If there are n channels, it presumably takes n times as long to converge. */
+ return channelData_[0].channel.getConvergenceFrames() * activeChannels_.size();
+}
+
+std::vector<double> const &Agc::getWeights() const
+{
+ /*
+ * In future the metering weights may be determined differently, making it
+ * difficult to associate different sets of weight with different channels.
+ * Therefore we shall impose a limitation, at least for now, that all
+ * channels will use the same weights.
+ */
+ return channelData_[0].channel.getWeights();
+}
+
+void Agc::setEv(unsigned int channelIndex, double ev)
+{
+ if (checkChannel(channelIndex))
+ return;
+
+ LOG(RPiAgc, Debug) << "setEv " << ev << " for channel " << channelIndex;
+ channelData_[channelIndex].channel.setEv(ev);
+}
+
+void Agc::setFlickerPeriod(Duration flickerPeriod)
+{
+ LOG(RPiAgc, Debug) << "setFlickerPeriod " << flickerPeriod;
+
+ /* Flicker period will be the same across all channels. */
+ for (auto &data : channelData_)
+ data.channel.setFlickerPeriod(flickerPeriod);
+}
+
+void Agc::setMaxShutter(Duration maxShutter)
+{
+ /* Frame durations will be the same across all channels too. */
+ for (auto &data : channelData_)
+ data.channel.setMaxShutter(maxShutter);
+}
+
+void Agc::setFixedShutter(unsigned int channelIndex, Duration fixedShutter)
+{
+ if (checkChannel(channelIndex))
+ return;
+
+ LOG(RPiAgc, Debug) << "setFixedShutter " << fixedShutter
+ << " for channel " << channelIndex;
+ channelData_[channelIndex].channel.setFixedShutter(fixedShutter);
+}
+
+void Agc::setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain)
+{
+ if (checkChannel(channelIndex))
+ return;
+
+ LOG(RPiAgc, Debug) << "setFixedAnalogueGain " << fixedAnalogueGain
+ << " for channel " << channelIndex;
+ channelData_[channelIndex].channel.setFixedAnalogueGain(fixedAnalogueGain);
+}
+
+void Agc::setMeteringMode(std::string const &meteringModeName)
+{
+ /* Metering modes will be the same across all channels too. */
+ for (auto &data : channelData_)
+ data.channel.setMeteringMode(meteringModeName);
+}
+
+void Agc::setExposureMode(std::string const &exposureModeName)
+{
+ LOG(RPiAgc, Debug) << "setExposureMode " << exposureModeName;
+
+ /* Exposure mode will be the same across all channels. */
+ for (auto &data : channelData_)
+ data.channel.setExposureMode(exposureModeName);
+}
+
+void Agc::setConstraintMode(std::string const &constraintModeName)
+{
+ LOG(RPiAgc, Debug) << "setConstraintMode " << constraintModeName;
+
+ /* Constraint mode will be the same across all channels. */
+ for (auto &data : channelData_)
+ data.channel.setConstraintMode(constraintModeName);
+}
+
+template<typename T>
+std::ostream &operator<<(std::ostream &os, const std::vector<T> &v)
+{
+ os << "{";
+ for (const auto &e : v)
+ os << " " << e;
+ os << " }";
+ return os;
+}
+
+void Agc::setActiveChannels(const std::vector<unsigned int> &activeChannels)
+{
+ if (activeChannels.empty()) {
+ LOG(RPiAgc, Warning) << "No active AGC channels supplied";
+ return;
+ }
+
+ for (auto index : activeChannels)
+ if (checkChannel(index))
+ return;
+
+ LOG(RPiAgc, Debug) << "setActiveChannels " << activeChannels;
+ activeChannels_ = activeChannels;
+ index_ = 0;
+}
+
+void Agc::switchMode(CameraMode const &cameraMode,
+ Metadata *metadata)
+{
+ /*
+ * We run switchMode on every channel, and then we're going to start over
+ * with the first active channel again which means that this channel's
+ * status needs to be the one we leave in the metadata.
+ */
+ AgcStatus status;
+
+ for (unsigned int channelIndex = 0; channelIndex < channelData_.size(); channelIndex++) {
+ LOG(RPiAgc, Debug) << "switchMode for channel " << channelIndex;
+ channelData_[channelIndex].channel.switchMode(cameraMode, metadata);
+ if (channelIndex == activeChannels_[0])
+ metadata->get("agc.status", status);
+ }
+
+ status.channel = activeChannels_[0];
+ metadata->set("agc.status", status);
+ index_ = 0;
+}
+
+static void getDelayedChannelIndex(Metadata *metadata, const char *message, unsigned int &channelIndex)
+{
+ std::unique_lock<RPiController::Metadata> lock(*metadata);
+ AgcStatus *status = metadata->getLocked<AgcStatus>("agc.delayed_status");
+ if (status)
+ channelIndex = status->channel;
+ else {
+ /* This does happen at startup, otherwise it would be a Warning or Error. */
+ LOG(RPiAgc, Debug) << message;
+ }
+}
+
+static libcamera::utils::Duration
+setCurrentChannelIndexGetExposure(Metadata *metadata, const char *message, unsigned int channelIndex)
+{
+ std::unique_lock<RPiController::Metadata> lock(*metadata);
+ AgcStatus *status = metadata->getLocked<AgcStatus>("agc.status");
+ libcamera::utils::Duration dur = 0s;
+
+ if (status) {
+ status->channel = channelIndex;
+ dur = status->totalExposureValue;
+ } else {
+ /* This does happen at startup, otherwise it would be a Warning or Error. */
+ LOG(RPiAgc, Debug) << message;
+ }
+
+ return dur;
+}
+
+void Agc::prepare(Metadata *imageMetadata)
+{
+ /*
+ * The DeviceStatus in the metadata should be correct for the image we
+ * are processing. The delayed status should tell us what channel this frame
+ * was from, so we will use that channel's prepare method.
+ *
+ * \todo To be honest, there's not much that's stateful in the prepare methods
+ * so we should perhaps re-evaluate whether prepare even needs to be done
+ * "per channel".
+ */
+ unsigned int channelIndex = activeChannels_[0];
+ getDelayedChannelIndex(imageMetadata, "prepare: no delayed status", channelIndex);
+
+ LOG(RPiAgc, Debug) << "prepare for channel " << channelIndex;
+ channelData_[channelIndex].channel.prepare(imageMetadata);
+}
+
+void Agc::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /*
+ * We want to generate values for the next channel in round robin fashion
+ * (i.e. the channel at location index_ in the activeChannel list), even though
+ * the statistics we have will be for a different channel (which we find
+ * again from the delayed status).
+ */
+
+ /* Generate updated AGC values for channel for new channel that we are requesting. */
+ unsigned int channelIndex = activeChannels_[index_];
+ AgcChannelData &channelData = channelData_[channelIndex];
+ /* The stats that arrived with this image correspond to the following channel. */
+ unsigned int statsIndex = 0;
+ getDelayedChannelIndex(imageMetadata, "process: no delayed status for stats", statsIndex);
+ LOG(RPiAgc, Debug) << "process for channel " << channelIndex;
+
+ /*
+ * We keep a cache of the most recent DeviceStatus and stats for each channel,
+ * so that we can invoke the next channel's process method with the most up to date
+ * values.
+ */
+ LOG(RPiAgc, Debug) << "Save DeviceStatus and stats for channel " << statsIndex;
+ DeviceStatus deviceStatus;
+ if (imageMetadata->get<DeviceStatus>("device.status", deviceStatus) == 0)
+ channelData_[statsIndex].deviceStatus = deviceStatus;
+ else
+ /* Every frame should have a DeviceStatus. */
+ LOG(RPiAgc, Error) << "process: no device status found";
+ channelData_[statsIndex].statistics = stats;
+
+ /*
+ * Finally fetch the most recent DeviceStatus and stats for the new channel, if both
+ * exist, and call process(). We must make the agc.status metadata record correctly
+ * which channel this is.
+ */
+ StatisticsPtr *statsPtr = &stats;
+ if (channelData.statistics && channelData.deviceStatus) {
+ deviceStatus = *channelData.deviceStatus;
+ statsPtr = &channelData.statistics;
+ } else {
+ /* Can also happen when new channels start. */
+ LOG(RPiAgc, Debug) << "process: channel " << channelIndex << " not seen yet";
+ }
+
+ channelData.channel.process(*statsPtr, deviceStatus, imageMetadata, channelTotalExposures_);
+ auto dur = setCurrentChannelIndexGetExposure(imageMetadata, "process: no AGC status found",
+ channelIndex);
+ if (dur)
+ channelTotalExposures_[channelIndex] = dur;
+
+ /* And onto the next channel for the next call. */
+ index_ = (index_ + 1) % activeChannels_.size();
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Agc(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/agc.h b/src/ipa/rpi/controller/rpi/agc.h
new file mode 100644
index 00000000..5d056f02
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/agc.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm
+ */
+#pragma once
+
+#include <optional>
+#include <string>
+#include <vector>
+
+#include "../agc_algorithm.h"
+
+#include "agc_channel.h"
+
+namespace RPiController {
+
+struct AgcChannelData {
+ AgcChannel channel;
+ std::optional<DeviceStatus> deviceStatus;
+ StatisticsPtr statistics;
+};
+
+class Agc : public AgcAlgorithm
+{
+public:
+ Agc(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ unsigned int getConvergenceFrames() const override;
+ std::vector<double> const &getWeights() const override;
+ void setEv(unsigned int channel, double ev) override;
+ void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) override;
+ void setMaxShutter(libcamera::utils::Duration maxShutter) override;
+ void setFixedShutter(unsigned int channelIndex,
+ libcamera::utils::Duration fixedShutter) override;
+ void setFixedAnalogueGain(unsigned int channelIndex,
+ double fixedAnalogueGain) override;
+ void setMeteringMode(std::string const &meteringModeName) override;
+ void setExposureMode(std::string const &exposureModeName) override;
+ void setConstraintMode(std::string const &contraintModeName) override;
+ void enableAuto() override;
+ void disableAuto() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ void setActiveChannels(const std::vector<unsigned int> &activeChannels) override;
+
+private:
+ int checkChannel(unsigned int channel) const;
+ std::vector<AgcChannelData> channelData_;
+ std::vector<unsigned int> activeChannels_;
+ unsigned int index_; /* index into the activeChannels_ */
+ AgcChannelTotalExposures channelTotalExposures_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/agc_channel.cpp b/src/ipa/rpi/controller/rpi/agc_channel.cpp
new file mode 100644
index 00000000..a77ccec3
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/agc_channel.cpp
@@ -0,0 +1,1022 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm
+ */
+
+#include "agc_channel.h"
+
+#include <algorithm>
+#include <tuple>
+
+#include <libcamera/base/log.h>
+
+#include "../awb_status.h"
+#include "../device_status.h"
+#include "../histogram.h"
+#include "../lux_status.h"
+#include "../metadata.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+using namespace std::literals::chrono_literals;
+
+LOG_DECLARE_CATEGORY(RPiAgc)
+
+int AgcMeteringMode::read(const libcamera::YamlObject &params)
+{
+ const YamlObject &yamlWeights = params["weights"];
+
+ for (const auto &p : yamlWeights.asList()) {
+ auto value = p.get<double>();
+ if (!value)
+ return -EINVAL;
+ weights.push_back(*value);
+ }
+
+ return 0;
+}
+
+static std::tuple<int, std::string>
+readMeteringModes(std::map<std::string, AgcMeteringMode> &metering_modes,
+ const libcamera::YamlObject &params)
+{
+ std::string first;
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ AgcMeteringMode meteringMode;
+ ret = meteringMode.read(value);
+ if (ret)
+ return { ret, {} };
+
+ metering_modes[key] = std::move(meteringMode);
+ if (first.empty())
+ first = key;
+ }
+
+ return { 0, first };
+}
+
+int AgcExposureMode::read(const libcamera::YamlObject &params)
+{
+ auto value = params["shutter"].getList<double>();
+ if (!value)
+ return -EINVAL;
+ std::transform(value->begin(), value->end(), std::back_inserter(shutter),
+ [](double v) { return v * 1us; });
+
+ value = params["gain"].getList<double>();
+ if (!value)
+ return -EINVAL;
+ gain = std::move(*value);
+
+ if (shutter.size() < 2 || gain.size() < 2) {
+ LOG(RPiAgc, Error)
+ << "AgcExposureMode: must have at least two entries in exposure profile";
+ return -EINVAL;
+ }
+
+ if (shutter.size() != gain.size()) {
+ LOG(RPiAgc, Error)
+ << "AgcExposureMode: expect same number of exposure and gain entries in exposure profile";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static std::tuple<int, std::string>
+readExposureModes(std::map<std::string, AgcExposureMode> &exposureModes,
+ const libcamera::YamlObject &params)
+{
+ std::string first;
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ AgcExposureMode exposureMode;
+ ret = exposureMode.read(value);
+ if (ret)
+ return { ret, {} };
+
+ exposureModes[key] = std::move(exposureMode);
+ if (first.empty())
+ first = key;
+ }
+
+ return { 0, first };
+}
+
+int AgcConstraint::read(const libcamera::YamlObject &params)
+{
+ std::string boundString = params["bound"].get<std::string>("");
+ transform(boundString.begin(), boundString.end(),
+ boundString.begin(), ::toupper);
+ if (boundString != "UPPER" && boundString != "LOWER") {
+ LOG(RPiAgc, Error) << "AGC constraint type should be UPPER or LOWER";
+ return -EINVAL;
+ }
+ bound = boundString == "UPPER" ? Bound::UPPER : Bound::LOWER;
+
+ auto value = params["q_lo"].get<double>();
+ if (!value)
+ return -EINVAL;
+ qLo = *value;
+
+ value = params["q_hi"].get<double>();
+ if (!value)
+ return -EINVAL;
+ qHi = *value;
+
+ return yTarget.read(params["y_target"]);
+}
+
+static std::tuple<int, AgcConstraintMode>
+readConstraintMode(const libcamera::YamlObject &params)
+{
+ AgcConstraintMode mode;
+ int ret;
+
+ for (const auto &p : params.asList()) {
+ AgcConstraint constraint;
+ ret = constraint.read(p);
+ if (ret)
+ return { ret, {} };
+
+ mode.push_back(std::move(constraint));
+ }
+
+ return { 0, mode };
+}
+
+static std::tuple<int, std::string>
+readConstraintModes(std::map<std::string, AgcConstraintMode> &constraintModes,
+ const libcamera::YamlObject &params)
+{
+ std::string first;
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ std::tie(ret, constraintModes[key]) = readConstraintMode(value);
+ if (ret)
+ return { ret, {} };
+
+ if (first.empty())
+ first = key;
+ }
+
+ return { 0, first };
+}
+
+int AgcChannelConstraint::read(const libcamera::YamlObject &params)
+{
+ auto channelValue = params["channel"].get<unsigned int>();
+ if (!channelValue) {
+ LOG(RPiAgc, Error) << "AGC channel constraint must have a channel";
+ return -EINVAL;
+ }
+ channel = *channelValue;
+
+ std::string boundString = params["bound"].get<std::string>("");
+ transform(boundString.begin(), boundString.end(),
+ boundString.begin(), ::toupper);
+ if (boundString != "UPPER" && boundString != "LOWER") {
+ LOG(RPiAgc, Error) << "AGC channel constraint type should be UPPER or LOWER";
+ return -EINVAL;
+ }
+ bound = boundString == "UPPER" ? Bound::UPPER : Bound::LOWER;
+
+ auto factorValue = params["factor"].get<double>();
+ if (!factorValue) {
+ LOG(RPiAgc, Error) << "AGC channel constraint must have a factor";
+ return -EINVAL;
+ }
+ factor = *factorValue;
+
+ return 0;
+}
+
+static int readChannelConstraints(std::vector<AgcChannelConstraint> &channelConstraints,
+ const libcamera::YamlObject &params)
+{
+ for (const auto &p : params.asList()) {
+ AgcChannelConstraint constraint;
+ int ret = constraint.read(p);
+ if (ret)
+ return ret;
+
+ channelConstraints.push_back(constraint);
+ }
+
+ return 0;
+}
+
+int AgcConfig::read(const libcamera::YamlObject &params)
+{
+ LOG(RPiAgc, Debug) << "AgcConfig";
+ int ret;
+
+ std::tie(ret, defaultMeteringMode) =
+ readMeteringModes(meteringModes, params["metering_modes"]);
+ if (ret)
+ return ret;
+ std::tie(ret, defaultExposureMode) =
+ readExposureModes(exposureModes, params["exposure_modes"]);
+ if (ret)
+ return ret;
+ std::tie(ret, defaultConstraintMode) =
+ readConstraintModes(constraintModes, params["constraint_modes"]);
+ if (ret)
+ return ret;
+
+ if (params.contains("channel_constraints")) {
+ ret = readChannelConstraints(channelConstraints, params["channel_constraints"]);
+ if (ret)
+ return ret;
+ }
+
+ ret = yTarget.read(params["y_target"]);
+ if (ret)
+ return ret;
+
+ speed = params["speed"].get<double>(0.2);
+ startupFrames = params["startup_frames"].get<uint16_t>(10);
+ convergenceFrames = params["convergence_frames"].get<unsigned int>(6);
+ fastReduceThreshold = params["fast_reduce_threshold"].get<double>(0.4);
+ baseEv = params["base_ev"].get<double>(1.0);
+
+ /* Start with quite a low value as ramping up is easier than ramping down. */
+ defaultExposureTime = params["default_exposure_time"].get<double>(1000) * 1us;
+ defaultAnalogueGain = params["default_analogue_gain"].get<double>(1.0);
+
+ stableRegion = params["stable_region"].get<double>(0.02);
+
+ desaturate = params["desaturate"].get<int>(1);
+
+ return 0;
+}
+
+AgcChannel::ExposureValues::ExposureValues()
+ : shutter(0s), analogueGain(0),
+ totalExposure(0s), totalExposureNoDG(0s)
+{
+}
+
+AgcChannel::AgcChannel()
+ : meteringMode_(nullptr), exposureMode_(nullptr), constraintMode_(nullptr),
+ frameCount_(0), lockCount_(0),
+ lastTargetExposure_(0s), ev_(1.0), flickerPeriod_(0s),
+ maxShutter_(0s), fixedShutter_(0s), fixedAnalogueGain_(0.0)
+{
+ /* Set AWB default values in case early frames have no updates in metadata. */
+ awb_.gainR = 1.0;
+ awb_.gainG = 1.0;
+ awb_.gainB = 1.0;
+
+ /*
+ * Setting status_.totalExposureValue_ to zero initially tells us
+ * it's not been calculated yet (i.e. Process hasn't yet run).
+ */
+ status_ = {};
+ status_.ev = ev_;
+}
+
+int AgcChannel::read(const libcamera::YamlObject &params,
+ const Controller::HardwareConfig &hardwareConfig)
+{
+ int ret = config_.read(params);
+ if (ret)
+ return ret;
+
+ const Size &size = hardwareConfig.agcZoneWeights;
+ for (auto const &modes : config_.meteringModes) {
+ if (modes.second.weights.size() != size.width * size.height) {
+ LOG(RPiAgc, Error) << "AgcMeteringMode: Incorrect number of weights";
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Set the config's defaults (which are the first ones it read) as our
+ * current modes, until someone changes them. (they're all known to
+ * exist at this point)
+ */
+ meteringModeName_ = config_.defaultMeteringMode;
+ meteringMode_ = &config_.meteringModes[meteringModeName_];
+ exposureModeName_ = config_.defaultExposureMode;
+ exposureMode_ = &config_.exposureModes[exposureModeName_];
+ constraintModeName_ = config_.defaultConstraintMode;
+ constraintMode_ = &config_.constraintModes[constraintModeName_];
+ /* Set up the "last shutter/gain" values, in case AGC starts "disabled". */
+ status_.shutterTime = config_.defaultExposureTime;
+ status_.analogueGain = config_.defaultAnalogueGain;
+ return 0;
+}
+
+void AgcChannel::disableAuto()
+{
+ fixedShutter_ = status_.shutterTime;
+ fixedAnalogueGain_ = status_.analogueGain;
+}
+
+void AgcChannel::enableAuto()
+{
+ fixedShutter_ = 0s;
+ fixedAnalogueGain_ = 0;
+}
+
+unsigned int AgcChannel::getConvergenceFrames() const
+{
+ /*
+ * If shutter and gain have been explicitly set, there is no
+ * convergence to happen, so no need to drop any frames - return zero.
+ */
+ if (fixedShutter_ && fixedAnalogueGain_)
+ return 0;
+ else
+ return config_.convergenceFrames;
+}
+
+std::vector<double> const &AgcChannel::getWeights() const
+{
+ /*
+ * In case someone calls setMeteringMode and then this before the
+ * algorithm has run and updated the meteringMode_ pointer.
+ */
+ auto it = config_.meteringModes.find(meteringModeName_);
+ if (it == config_.meteringModes.end())
+ return meteringMode_->weights;
+ return it->second.weights;
+}
+
+void AgcChannel::setEv(double ev)
+{
+ ev_ = ev;
+}
+
+void AgcChannel::setFlickerPeriod(Duration flickerPeriod)
+{
+ flickerPeriod_ = flickerPeriod;
+}
+
+void AgcChannel::setMaxShutter(Duration maxShutter)
+{
+ maxShutter_ = maxShutter;
+}
+
+void AgcChannel::setFixedShutter(Duration fixedShutter)
+{
+ fixedShutter_ = fixedShutter;
+ /* Set this in case someone calls disableAuto() straight after. */
+ status_.shutterTime = limitShutter(fixedShutter_);
+}
+
+void AgcChannel::setFixedAnalogueGain(double fixedAnalogueGain)
+{
+ fixedAnalogueGain_ = fixedAnalogueGain;
+ /* Set this in case someone calls disableAuto() straight after. */
+ status_.analogueGain = limitGain(fixedAnalogueGain);
+}
+
+void AgcChannel::setMeteringMode(std::string const &meteringModeName)
+{
+ meteringModeName_ = meteringModeName;
+}
+
+void AgcChannel::setExposureMode(std::string const &exposureModeName)
+{
+ exposureModeName_ = exposureModeName;
+}
+
+void AgcChannel::setConstraintMode(std::string const &constraintModeName)
+{
+ constraintModeName_ = constraintModeName;
+}
+
+void AgcChannel::switchMode(CameraMode const &cameraMode,
+ Metadata *metadata)
+{
+ /* AGC expects the mode sensitivity always to be non-zero. */
+ ASSERT(cameraMode.sensitivity);
+
+ housekeepConfig();
+
+ /*
+ * Store the mode in the local state. We must cache the sensitivity of
+ * of the previous mode for the calculations below.
+ */
+ double lastSensitivity = mode_.sensitivity;
+ mode_ = cameraMode;
+
+ Duration fixedShutter = limitShutter(fixedShutter_);
+ if (fixedShutter && fixedAnalogueGain_) {
+ /* We're going to reset the algorithm here with these fixed values. */
+ fetchAwbStatus(metadata);
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
+
+ /* This is the equivalent of computeTargetExposure and applyDigitalGain. */
+ target_.totalExposureNoDG = fixedShutter_ * fixedAnalogueGain_;
+ target_.totalExposure = target_.totalExposureNoDG / minColourGain;
+
+ /* Equivalent of filterExposure. This resets any "history". */
+ filtered_ = target_;
+
+ /* Equivalent of divideUpExposure. */
+ filtered_.shutter = fixedShutter;
+ filtered_.analogueGain = fixedAnalogueGain_;
+ } else if (status_.totalExposureValue) {
+ /*
+ * On a mode switch, various things could happen:
+ * - the exposure profile might change
+ * - a fixed exposure or gain might be set
+ * - the new mode's sensitivity might be different
+ * We cope with the last of these by scaling the target values. After
+ * that we just need to re-divide the exposure/gain according to the
+ * current exposure profile, which takes care of everything else.
+ */
+
+ double ratio = lastSensitivity / cameraMode.sensitivity;
+ target_.totalExposureNoDG *= ratio;
+ target_.totalExposure *= ratio;
+ filtered_.totalExposureNoDG *= ratio;
+ filtered_.totalExposure *= ratio;
+
+ divideUpExposure();
+ } else {
+ /*
+ * We come through here on startup, when at least one of the shutter
+ * or gain has not been fixed. We must still write those values out so
+ * that they will be applied immediately. We supply some arbitrary defaults
+ * for any that weren't set.
+ */
+
+ /* Equivalent of divideUpExposure. */
+ filtered_.shutter = fixedShutter ? fixedShutter : config_.defaultExposureTime;
+ filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain;
+ }
+
+ writeAndFinish(metadata, false);
+}
+
+void AgcChannel::prepare(Metadata *imageMetadata)
+{
+ Duration totalExposureValue = status_.totalExposureValue;
+ AgcStatus delayedStatus;
+ AgcPrepareStatus prepareStatus;
+
+ /* Fetch the AWB status now because AWB also sets it in the prepare method. */
+ fetchAwbStatus(imageMetadata);
+
+ if (!imageMetadata->get("agc.delayed_status", delayedStatus))
+ totalExposureValue = delayedStatus.totalExposureValue;
+
+ prepareStatus.digitalGain = 1.0;
+ prepareStatus.locked = false;
+
+ if (status_.totalExposureValue) {
+ /* Process has run, so we have meaningful values. */
+ DeviceStatus deviceStatus;
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ Duration actualExposure = deviceStatus.shutterSpeed *
+ deviceStatus.analogueGain;
+ if (actualExposure) {
+ double digitalGain = totalExposureValue / actualExposure;
+ LOG(RPiAgc, Debug) << "Want total exposure " << totalExposureValue;
+ /*
+ * Never ask for a gain < 1.0, and also impose
+ * some upper limit. Make it customisable?
+ */
+ prepareStatus.digitalGain = std::max(1.0, std::min(digitalGain, 4.0));
+ LOG(RPiAgc, Debug) << "Actual exposure " << actualExposure;
+ LOG(RPiAgc, Debug) << "Use digitalGain " << prepareStatus.digitalGain;
+ LOG(RPiAgc, Debug) << "Effective exposure "
+ << actualExposure * prepareStatus.digitalGain;
+ /* Decide whether AEC/AGC has converged. */
+ prepareStatus.locked = updateLockStatus(deviceStatus);
+ }
+ } else
+ LOG(RPiAgc, Warning) << "AgcChannel: no device metadata";
+ imageMetadata->set("agc.prepare_status", prepareStatus);
+ }
+}
+
+void AgcChannel::process(StatisticsPtr &stats, DeviceStatus const &deviceStatus,
+ Metadata *imageMetadata,
+ const AgcChannelTotalExposures &channelTotalExposures)
+{
+ frameCount_++;
+ /*
+ * First a little bit of housekeeping, fetching up-to-date settings and
+ * configuration, that kind of thing.
+ */
+ housekeepConfig();
+ /* Get the current exposure values for the frame that's just arrived. */
+ fetchCurrentExposure(deviceStatus);
+ /* Compute the total gain we require relative to the current exposure. */
+ double gain, targetY;
+ computeGain(stats, imageMetadata, gain, targetY);
+ /* Now compute the target (final) exposure which we think we want. */
+ computeTargetExposure(gain);
+ /* The results have to be filtered so as not to change too rapidly. */
+ filterExposure();
+ /*
+ * We may be asked to limit the exposure using other channels. If another channel
+ * determines our upper bound we may want to know this later.
+ */
+ bool channelBound = applyChannelConstraints(channelTotalExposures);
+ /*
+ * Some of the exposure has to be applied as digital gain, so work out
+ * what that is. It also tells us whether it's trying to desaturate the image
+ * more quickly, which can only happen when another channel is not limiting us.
+ */
+ bool desaturate = applyDigitalGain(gain, targetY, channelBound);
+ /*
+ * The last thing is to divide up the exposure value into a shutter time
+ * and analogue gain, according to the current exposure mode.
+ */
+ divideUpExposure();
+ /* Finally advertise what we've done. */
+ writeAndFinish(imageMetadata, desaturate);
+}
+
+bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus)
+{
+ const double errorFactor = 0.10; /* make these customisable? */
+ const int maxLockCount = 5;
+ /* Reset "lock count" when we exceed this multiple of errorFactor */
+ const double resetMargin = 1.5;
+
+ /* Add 200us to the exposure time error to allow for line quantisation. */
+ Duration exposureError = lastDeviceStatus_.shutterSpeed * errorFactor + 200us;
+ double gainError = lastDeviceStatus_.analogueGain * errorFactor;
+ Duration targetError = lastTargetExposure_ * errorFactor;
+
+ /*
+ * Note that we don't know the exposure/gain limits of the sensor, so
+ * the values we keep requesting may be unachievable. For this reason
+ * we only insist that we're close to values in the past few frames.
+ */
+ if (deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed - exposureError &&
+ deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed + exposureError &&
+ deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError &&
+ deviceStatus.analogueGain < lastDeviceStatus_.analogueGain + gainError &&
+ status_.targetExposureValue > lastTargetExposure_ - targetError &&
+ status_.targetExposureValue < lastTargetExposure_ + targetError)
+ lockCount_ = std::min(lockCount_ + 1, maxLockCount);
+ else if (deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed - resetMargin * exposureError ||
+ deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed + resetMargin * exposureError ||
+ deviceStatus.analogueGain < lastDeviceStatus_.analogueGain - resetMargin * gainError ||
+ deviceStatus.analogueGain > lastDeviceStatus_.analogueGain + resetMargin * gainError ||
+ status_.targetExposureValue < lastTargetExposure_ - resetMargin * targetError ||
+ status_.targetExposureValue > lastTargetExposure_ + resetMargin * targetError)
+ lockCount_ = 0;
+
+ lastDeviceStatus_ = deviceStatus;
+ lastTargetExposure_ = status_.targetExposureValue;
+
+ LOG(RPiAgc, Debug) << "Lock count updated to " << lockCount_;
+ return lockCount_ == maxLockCount;
+}
+
+void AgcChannel::housekeepConfig()
+{
+ /* First fetch all the up-to-date settings, so no one else has to do it. */
+ status_.ev = ev_;
+ status_.fixedShutter = limitShutter(fixedShutter_);
+ status_.fixedAnalogueGain = fixedAnalogueGain_;
+ status_.flickerPeriod = flickerPeriod_;
+ LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedShutter "
+ << status_.fixedShutter << " fixedAnalogueGain "
+ << status_.fixedAnalogueGain;
+ /*
+ * Make sure the "mode" pointers point to the up-to-date things, if
+ * they've changed.
+ */
+ if (meteringModeName_ != status_.meteringMode) {
+ auto it = config_.meteringModes.find(meteringModeName_);
+ if (it == config_.meteringModes.end()) {
+ LOG(RPiAgc, Warning) << "No metering mode " << meteringModeName_;
+ meteringModeName_ = status_.meteringMode;
+ } else {
+ meteringMode_ = &it->second;
+ status_.meteringMode = meteringModeName_;
+ }
+ }
+ if (exposureModeName_ != status_.exposureMode) {
+ auto it = config_.exposureModes.find(exposureModeName_);
+ if (it == config_.exposureModes.end()) {
+ LOG(RPiAgc, Warning) << "No exposure profile " << exposureModeName_;
+ exposureModeName_ = status_.exposureMode;
+ } else {
+ exposureMode_ = &it->second;
+ status_.exposureMode = exposureModeName_;
+ }
+ }
+ if (constraintModeName_ != status_.constraintMode) {
+ auto it = config_.constraintModes.find(constraintModeName_);
+ if (it == config_.constraintModes.end()) {
+ LOG(RPiAgc, Warning) << "No constraint list " << constraintModeName_;
+ constraintModeName_ = status_.constraintMode;
+ } else {
+ constraintMode_ = &it->second;
+ status_.constraintMode = constraintModeName_;
+ }
+ }
+ LOG(RPiAgc, Debug) << "exposureMode "
+ << exposureModeName_ << " constraintMode "
+ << constraintModeName_ << " meteringMode "
+ << meteringModeName_;
+}
+
+void AgcChannel::fetchCurrentExposure(DeviceStatus const &deviceStatus)
+{
+ current_.shutter = deviceStatus.shutterSpeed;
+ current_.analogueGain = deviceStatus.analogueGain;
+ current_.totalExposure = 0s; /* this value is unused */
+ current_.totalExposureNoDG = current_.shutter * current_.analogueGain;
+}
+
+void AgcChannel::fetchAwbStatus(Metadata *imageMetadata)
+{
+ if (imageMetadata->get("awb.status", awb_) != 0)
+ LOG(RPiAgc, Debug) << "No AWB status found";
+}
+
+static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb,
+ std::vector<double> &weights, double gain)
+{
+ constexpr uint64_t maxVal = 1 << Statistics::NormalisationFactorPow2;
+
+ /*
+ * If we have no AGC region stats, but do have a a Y histogram, use that
+ * directly to caluclate the mean Y value of the image.
+ */
+ if (!stats->agcRegions.numRegions() && stats->yHist.bins()) {
+ /*
+ * When the gain is applied to the histogram, anything below minBin
+ * will scale up directly with the gain, but anything above that
+ * will saturate into the top bin.
+ */
+ auto &hist = stats->yHist;
+ double minBin = std::min(1.0, 1.0 / gain) * hist.bins();
+ double binMean = hist.interBinMean(0.0, minBin);
+ double numUnsaturated = hist.cumulativeFreq(minBin);
+ /* This term is from all the pixels that won't saturate. */
+ double ySum = binMean * gain * numUnsaturated;
+ /* And add the ones that will saturate. */
+ ySum += (hist.total() - numUnsaturated) * hist.bins();
+ return ySum / hist.total() / hist.bins();
+ }
+
+ ASSERT(weights.size() == stats->agcRegions.numRegions());
+
+ /*
+ * Note that the weights are applied by the IPA to the statistics directly,
+ * before they are given to us here.
+ */
+ double rSum = 0, gSum = 0, bSum = 0, pixelSum = 0;
+ for (unsigned int i = 0; i < stats->agcRegions.numRegions(); i++) {
+ auto &region = stats->agcRegions.get(i);
+ rSum += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted);
+ gSum += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted);
+ bSum += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted);
+ pixelSum += region.counted;
+ }
+ if (pixelSum == 0.0) {
+ LOG(RPiAgc, Warning) << "computeInitialY: pixelSum is zero";
+ return 0;
+ }
+
+ double ySum;
+ /* Factor in the AWB correction if needed. */
+ if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb) {
+ ySum = rSum * awb.gainR * .299 +
+ gSum * awb.gainG * .587 +
+ bSum * awb.gainB * .114;
+ } else
+ ySum = rSum * .299 + gSum * .587 + bSum * .114;
+
+ return ySum / pixelSum / (1 << 16);
+}
+
+/*
+ * We handle extra gain through EV by adjusting our Y targets. However, you
+ * simply can't monitor histograms once they get very close to (or beyond!)
+ * saturation, so we clamp the Y targets to this value. It does mean that EV
+ * increases don't necessarily do quite what you might expect in certain
+ * (contrived) cases.
+ */
+
+static constexpr double EvGainYTargetLimit = 0.9;
+
+static double constraintComputeGain(AgcConstraint &c, const Histogram &h, double lux,
+ double evGain, double &targetY)
+{
+ targetY = c.yTarget.eval(c.yTarget.domain().clip(lux));
+ targetY = std::min(EvGainYTargetLimit, targetY * evGain);
+ double iqm = h.interQuantileMean(c.qLo, c.qHi);
+ return (targetY * h.bins()) / iqm;
+}
+
+void AgcChannel::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata,
+ double &gain, double &targetY)
+{
+ struct LuxStatus lux = {};
+ lux.lux = 400; /* default lux level to 400 in case no metadata found */
+ if (imageMetadata->get("lux.status", lux) != 0)
+ LOG(RPiAgc, Warning) << "No lux level found";
+ const Histogram &h = statistics->yHist;
+ double evGain = status_.ev * config_.baseEv;
+ /*
+ * The initial gain and target_Y come from some of the regions. After
+ * that we consider the histogram constraints.
+ */
+ targetY = config_.yTarget.eval(config_.yTarget.domain().clip(lux.lux));
+ targetY = std::min(EvGainYTargetLimit, targetY * evGain);
+
+ /*
+ * Do this calculation a few times as brightness increase can be
+ * non-linear when there are saturated regions.
+ */
+ gain = 1.0;
+ for (int i = 0; i < 8; i++) {
+ double initialY = computeInitialY(statistics, awb_, meteringMode_->weights, gain);
+ double extraGain = std::min(10.0, targetY / (initialY + .001));
+ gain *= extraGain;
+ LOG(RPiAgc, Debug) << "Initial Y " << initialY << " target " << targetY
+ << " gives gain " << gain;
+ if (extraGain < 1.01) /* close enough */
+ break;
+ }
+
+ for (auto &c : *constraintMode_) {
+ double newTargetY;
+ double newGain = constraintComputeGain(c, h, lux.lux, evGain, newTargetY);
+ LOG(RPiAgc, Debug) << "Constraint has target_Y "
+ << newTargetY << " giving gain " << newGain;
+ if (c.bound == AgcConstraint::Bound::LOWER && newGain > gain) {
+ LOG(RPiAgc, Debug) << "Lower bound constraint adopted";
+ gain = newGain;
+ targetY = newTargetY;
+ } else if (c.bound == AgcConstraint::Bound::UPPER && newGain < gain) {
+ LOG(RPiAgc, Debug) << "Upper bound constraint adopted";
+ gain = newGain;
+ targetY = newTargetY;
+ }
+ }
+ LOG(RPiAgc, Debug) << "Final gain " << gain << " (target_Y " << targetY << " ev "
+ << status_.ev << " base_ev " << config_.baseEv
+ << ")";
+}
+
+void AgcChannel::computeTargetExposure(double gain)
+{
+ if (status_.fixedShutter && status_.fixedAnalogueGain) {
+ /*
+ * When ag and shutter are both fixed, we need to drive the
+ * total exposure so that we end up with a digital gain of at least
+ * 1/minColourGain. Otherwise we'd desaturate channels causing
+ * white to go cyan or magenta.
+ */
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
+ target_.totalExposure =
+ status_.fixedShutter * status_.fixedAnalogueGain / minColourGain;
+ } else {
+ /*
+ * The statistics reflect the image without digital gain, so the final
+ * total exposure we're aiming for is:
+ */
+ target_.totalExposure = current_.totalExposureNoDG * gain;
+ /* The final target exposure is also limited to what the exposure mode allows. */
+ Duration maxShutter = status_.fixedShutter
+ ? status_.fixedShutter
+ : exposureMode_->shutter.back();
+ maxShutter = limitShutter(maxShutter);
+ Duration maxTotalExposure =
+ maxShutter *
+ (status_.fixedAnalogueGain != 0.0
+ ? status_.fixedAnalogueGain
+ : exposureMode_->gain.back());
+ target_.totalExposure = std::min(target_.totalExposure, maxTotalExposure);
+ }
+ LOG(RPiAgc, Debug) << "Target totalExposure " << target_.totalExposure;
+}
+
+bool AgcChannel::applyChannelConstraints(const AgcChannelTotalExposures &channelTotalExposures)
+{
+ bool channelBound = false;
+ LOG(RPiAgc, Debug)
+ << "Total exposure before channel constraints " << filtered_.totalExposure;
+
+ for (const auto &constraint : config_.channelConstraints) {
+ LOG(RPiAgc, Debug)
+ << "Check constraint: channel " << constraint.channel << " bound "
+ << (constraint.bound == AgcChannelConstraint::Bound::UPPER ? "UPPER" : "LOWER")
+ << " factor " << constraint.factor;
+ if (constraint.channel >= channelTotalExposures.size() ||
+ !channelTotalExposures[constraint.channel]) {
+ LOG(RPiAgc, Debug) << "no such channel or no exposure available- skipped";
+ continue;
+ }
+
+ libcamera::utils::Duration limitExposure =
+ channelTotalExposures[constraint.channel] * constraint.factor;
+ LOG(RPiAgc, Debug) << "Limit exposure " << limitExposure;
+ if ((constraint.bound == AgcChannelConstraint::Bound::UPPER &&
+ filtered_.totalExposure > limitExposure) ||
+ (constraint.bound == AgcChannelConstraint::Bound::LOWER &&
+ filtered_.totalExposure < limitExposure)) {
+ filtered_.totalExposure = limitExposure;
+ LOG(RPiAgc, Debug) << "Constraint applies";
+ channelBound = true;
+ } else
+ LOG(RPiAgc, Debug) << "Constraint does not apply";
+ }
+
+ LOG(RPiAgc, Debug)
+ << "Total exposure after channel constraints " << filtered_.totalExposure;
+
+ return channelBound;
+}
+
+bool AgcChannel::applyDigitalGain(double gain, double targetY, bool channelBound)
+{
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
+ double dg = 1.0 / minColourGain;
+ /*
+ * I think this pipeline subtracts black level and rescales before we
+ * get the stats, so no need to worry about it.
+ */
+ LOG(RPiAgc, Debug) << "after AWB, target dg " << dg << " gain " << gain
+ << " target_Y " << targetY;
+ /*
+ * Finally, if we're trying to reduce exposure but the target_Y is
+ * "close" to 1.0, then the gain computed for that constraint will be
+ * only slightly less than one, because the measured Y can never be
+ * larger than 1.0. When this happens, demand a large digital gain so
+ * that the exposure can be reduced, de-saturating the image much more
+ * quickly (and we then approach the correct value more quickly from
+ * below).
+ */
+ bool desaturate = false;
+ if (config_.desaturate)
+ desaturate = !channelBound &&
+ targetY > config_.fastReduceThreshold && gain < sqrt(targetY);
+ if (desaturate)
+ dg /= config_.fastReduceThreshold;
+ LOG(RPiAgc, Debug) << "Digital gain " << dg << " desaturate? " << desaturate;
+ filtered_.totalExposureNoDG = filtered_.totalExposure / dg;
+ LOG(RPiAgc, Debug) << "Target totalExposureNoDG " << filtered_.totalExposureNoDG;
+ return desaturate;
+}
+
+void AgcChannel::filterExposure()
+{
+ double speed = config_.speed;
+ double stableRegion = config_.stableRegion;
+
+ /*
+ * AGC adapts instantly if both shutter and gain are directly specified
+ * or we're in the startup phase.
+ */
+ if ((status_.fixedShutter && status_.fixedAnalogueGain) ||
+ frameCount_ <= config_.startupFrames)
+ speed = 1.0;
+ if (!filtered_.totalExposure) {
+ filtered_.totalExposure = target_.totalExposure;
+ } else if (filtered_.totalExposure * (1.0 - stableRegion) < target_.totalExposure &&
+ filtered_.totalExposure * (1.0 + stableRegion) > target_.totalExposure) {
+ /* Total exposure must change by more than this or we leave it alone. */
+ } else {
+ /*
+ * If close to the result go faster, to save making so many
+ * micro-adjustments on the way. (Make this customisable?)
+ */
+ if (filtered_.totalExposure < 1.2 * target_.totalExposure &&
+ filtered_.totalExposure > 0.8 * target_.totalExposure)
+ speed = sqrt(speed);
+ filtered_.totalExposure = speed * target_.totalExposure +
+ filtered_.totalExposure * (1.0 - speed);
+ }
+ LOG(RPiAgc, Debug) << "After filtering, totalExposure " << filtered_.totalExposure
+ << " no dg " << filtered_.totalExposureNoDG;
+}
+
+void AgcChannel::divideUpExposure()
+{
+ /*
+ * Sending the fixed shutter/gain cases through the same code may seem
+ * unnecessary, but it will make more sense when extend this to cover
+ * variable aperture.
+ */
+ Duration exposureValue = filtered_.totalExposureNoDG;
+ Duration shutterTime;
+ double analogueGain;
+ shutterTime = status_.fixedShutter ? status_.fixedShutter
+ : exposureMode_->shutter[0];
+ shutterTime = limitShutter(shutterTime);
+ analogueGain = status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain
+ : exposureMode_->gain[0];
+ analogueGain = limitGain(analogueGain);
+ if (shutterTime * analogueGain < exposureValue) {
+ for (unsigned int stage = 1;
+ stage < exposureMode_->gain.size(); stage++) {
+ if (!status_.fixedShutter) {
+ Duration stageShutter =
+ limitShutter(exposureMode_->shutter[stage]);
+ if (stageShutter * analogueGain >= exposureValue) {
+ shutterTime = exposureValue / analogueGain;
+ break;
+ }
+ shutterTime = stageShutter;
+ }
+ if (status_.fixedAnalogueGain == 0.0) {
+ if (exposureMode_->gain[stage] * shutterTime >= exposureValue) {
+ analogueGain = exposureValue / shutterTime;
+ break;
+ }
+ analogueGain = exposureMode_->gain[stage];
+ analogueGain = limitGain(analogueGain);
+ }
+ }
+ }
+ LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutterTime << " and "
+ << analogueGain;
+ /*
+ * Finally adjust shutter time for flicker avoidance (require both
+ * shutter and gain not to be fixed).
+ */
+ if (!status_.fixedShutter && !status_.fixedAnalogueGain &&
+ status_.flickerPeriod) {
+ int flickerPeriods = shutterTime / status_.flickerPeriod;
+ if (flickerPeriods) {
+ Duration newShutterTime = flickerPeriods * status_.flickerPeriod;
+ analogueGain *= shutterTime / newShutterTime;
+ /*
+ * We should still not allow the ag to go over the
+ * largest value in the exposure mode. Note that this
+ * may force more of the total exposure into the digital
+ * gain as a side-effect.
+ */
+ analogueGain = std::min(analogueGain, exposureMode_->gain.back());
+ analogueGain = limitGain(analogueGain);
+ shutterTime = newShutterTime;
+ }
+ LOG(RPiAgc, Debug) << "After flicker avoidance, shutter "
+ << shutterTime << " gain " << analogueGain;
+ }
+ filtered_.shutter = shutterTime;
+ filtered_.analogueGain = analogueGain;
+}
+
+void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate)
+{
+ status_.totalExposureValue = filtered_.totalExposure;
+ status_.targetExposureValue = desaturate ? 0s : target_.totalExposure;
+ status_.shutterTime = filtered_.shutter;
+ status_.analogueGain = filtered_.analogueGain;
+ /*
+ * Write to metadata as well, in case anyone wants to update the camera
+ * immediately.
+ */
+ imageMetadata->set("agc.status", status_);
+ LOG(RPiAgc, Debug) << "Output written, total exposure requested is "
+ << filtered_.totalExposure;
+ LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter
+ << " analogue gain " << filtered_.analogueGain;
+}
+
+Duration AgcChannel::limitShutter(Duration shutter)
+{
+ /*
+ * shutter == 0 is a special case for fixed shutter values, and must pass
+ * through unchanged
+ */
+ if (!shutter)
+ return shutter;
+
+ shutter = std::clamp(shutter, mode_.minShutter, maxShutter_);
+ return shutter;
+}
+
+double AgcChannel::limitGain(double gain) const
+{
+ /*
+ * Only limit the lower bounds of the gain value to what the sensor limits.
+ * The upper bound on analogue gain will be made up with additional digital
+ * gain applied by the ISP.
+ *
+ * gain == 0.0 is a special case for fixed shutter values, and must pass
+ * through unchanged
+ */
+ if (!gain)
+ return gain;
+
+ gain = std::max(gain, mode_.minAnalogueGain);
+ return gain;
+}
diff --git a/src/ipa/rpi/controller/rpi/agc_channel.h b/src/ipa/rpi/controller/rpi/agc_channel.h
new file mode 100644
index 00000000..99033e23
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/agc_channel.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm
+ */
+#pragma once
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include "../agc_status.h"
+#include "../awb_status.h"
+#include "../controller.h"
+#include "../pwl.h"
+
+/* This is our implementation of AGC. */
+
+namespace RPiController {
+
+using AgcChannelTotalExposures = std::vector<libcamera::utils::Duration>;
+
+struct AgcMeteringMode {
+ std::vector<double> weights;
+ int read(const libcamera::YamlObject &params);
+};
+
+struct AgcExposureMode {
+ std::vector<libcamera::utils::Duration> shutter;
+ std::vector<double> gain;
+ int read(const libcamera::YamlObject &params);
+};
+
+struct AgcConstraint {
+ enum class Bound { LOWER = 0,
+ UPPER = 1 };
+ Bound bound;
+ double qLo;
+ double qHi;
+ Pwl yTarget;
+ int read(const libcamera::YamlObject &params);
+};
+
+typedef std::vector<AgcConstraint> AgcConstraintMode;
+
+struct AgcChannelConstraint {
+ enum class Bound { LOWER = 0,
+ UPPER = 1 };
+ Bound bound;
+ unsigned int channel;
+ double factor;
+ int read(const libcamera::YamlObject &params);
+};
+
+struct AgcConfig {
+ int read(const libcamera::YamlObject &params);
+ std::map<std::string, AgcMeteringMode> meteringModes;
+ std::map<std::string, AgcExposureMode> exposureModes;
+ std::map<std::string, AgcConstraintMode> constraintModes;
+ std::vector<AgcChannelConstraint> channelConstraints;
+ Pwl yTarget;
+ double speed;
+ uint16_t startupFrames;
+ unsigned int convergenceFrames;
+ double maxChange;
+ double minChange;
+ double fastReduceThreshold;
+ double speedUpThreshold;
+ std::string defaultMeteringMode;
+ std::string defaultExposureMode;
+ std::string defaultConstraintMode;
+ double baseEv;
+ libcamera::utils::Duration defaultExposureTime;
+ double defaultAnalogueGain;
+ double stableRegion;
+ bool desaturate;
+};
+
+class AgcChannel
+{
+public:
+ AgcChannel();
+ int read(const libcamera::YamlObject &params,
+ const Controller::HardwareConfig &hardwareConfig);
+ unsigned int getConvergenceFrames() const;
+ std::vector<double> const &getWeights() const;
+ void setEv(double ev);
+ void setFlickerPeriod(libcamera::utils::Duration flickerPeriod);
+ void setMaxShutter(libcamera::utils::Duration maxShutter);
+ void setFixedShutter(libcamera::utils::Duration fixedShutter);
+ void setFixedAnalogueGain(double fixedAnalogueGain);
+ void setMeteringMode(std::string const &meteringModeName);
+ void setExposureMode(std::string const &exposureModeName);
+ void setConstraintMode(std::string const &contraintModeName);
+ void enableAuto();
+ void disableAuto();
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata);
+ void prepare(Metadata *imageMetadata);
+ void process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, Metadata *imageMetadata,
+ const AgcChannelTotalExposures &channelTotalExposures);
+
+private:
+ bool updateLockStatus(DeviceStatus const &deviceStatus);
+ AgcConfig config_;
+ void housekeepConfig();
+ void fetchCurrentExposure(DeviceStatus const &deviceStatus);
+ void fetchAwbStatus(Metadata *imageMetadata);
+ void computeGain(StatisticsPtr &statistics, Metadata *imageMetadata,
+ double &gain, double &targetY);
+ void computeTargetExposure(double gain);
+ void filterExposure();
+ bool applyChannelConstraints(const AgcChannelTotalExposures &channelTotalExposures);
+ bool applyDigitalGain(double gain, double targetY, bool channelBound);
+ void divideUpExposure();
+ void writeAndFinish(Metadata *imageMetadata, bool desaturate);
+ libcamera::utils::Duration limitShutter(libcamera::utils::Duration shutter);
+ double limitGain(double gain) const;
+ AgcMeteringMode *meteringMode_;
+ AgcExposureMode *exposureMode_;
+ AgcConstraintMode *constraintMode_;
+ CameraMode mode_;
+ uint64_t frameCount_;
+ AwbStatus awb_;
+ struct ExposureValues {
+ ExposureValues();
+
+ libcamera::utils::Duration shutter;
+ double analogueGain;
+ libcamera::utils::Duration totalExposure;
+ libcamera::utils::Duration totalExposureNoDG; /* without digital gain */
+ };
+ ExposureValues current_; /* values for the current frame */
+ ExposureValues target_; /* calculate the values we want here */
+ ExposureValues filtered_; /* these values are filtered towards target */
+ AgcStatus status_;
+ int lockCount_;
+ DeviceStatus lastDeviceStatus_;
+ libcamera::utils::Duration lastTargetExposure_;
+ /* Below here the "settings" that applications can change. */
+ std::string meteringModeName_;
+ std::string exposureModeName_;
+ std::string constraintModeName_;
+ double ev_;
+ libcamera::utils::Duration flickerPeriod_;
+ libcamera::utils::Duration maxShutter_;
+ libcamera::utils::Duration fixedShutter_;
+ double fixedAnalogueGain_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/alsc.cpp b/src/ipa/rpi/controller/rpi/alsc.cpp
new file mode 100644
index 00000000..67029fc3
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/alsc.cpp
@@ -0,0 +1,867 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ALSC (auto lens shading correction) control algorithm
+ */
+
+#include <algorithm>
+#include <functional>
+#include <math.h>
+#include <numeric>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "../awb_status.h"
+#include "alsc.h"
+
+/* Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm. */
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiAlsc)
+
+#define NAME "rpi.alsc"
+
+static const double InsufficientData = -1.0;
+
+Alsc::Alsc(Controller *controller)
+ : Algorithm(controller)
+{
+ asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false;
+ asyncThread_ = std::thread(std::bind(&Alsc::asyncFunc, this));
+}
+
+Alsc::~Alsc()
+{
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncAbort_ = true;
+ }
+ asyncSignal_.notify_one();
+ asyncThread_.join();
+}
+
+char const *Alsc::name() const
+{
+ return NAME;
+}
+
+static int generateLut(Array2D<double> &lut, const libcamera::YamlObject &params)
+{
+ /* These must be signed ints for the co-ordinate calculations below. */
+ int X = lut.dimensions().width, Y = lut.dimensions().height;
+ double cstrength = params["corner_strength"].get<double>(2.0);
+ if (cstrength <= 1.0) {
+ LOG(RPiAlsc, Error) << "corner_strength must be > 1.0";
+ return -EINVAL;
+ }
+
+ double asymmetry = params["asymmetry"].get<double>(1.0);
+ if (asymmetry < 0) {
+ LOG(RPiAlsc, Error) << "asymmetry must be >= 0";
+ return -EINVAL;
+ }
+
+ double f1 = cstrength - 1, f2 = 1 + sqrt(cstrength);
+ double R2 = X * Y / 4 * (1 + asymmetry * asymmetry);
+ int num = 0;
+ for (int y = 0; y < Y; y++) {
+ for (int x = 0; x < X; x++) {
+ double dy = y - Y / 2 + 0.5,
+ dx = (x - X / 2 + 0.5) * asymmetry;
+ double r2 = (dx * dx + dy * dy) / R2;
+ lut[num++] =
+ (f1 * r2 + f2) * (f1 * r2 + f2) /
+ (f2 * f2); /* this reproduces the cos^4 rule */
+ }
+ }
+ return 0;
+}
+
+static int readLut(Array2D<double> &lut, const libcamera::YamlObject &params)
+{
+ if (params.size() != lut.size()) {
+ LOG(RPiAlsc, Error) << "Invalid number of entries in LSC table";
+ return -EINVAL;
+ }
+
+ int num = 0;
+ for (const auto &p : params.asList()) {
+ auto value = p.get<double>();
+ if (!value)
+ return -EINVAL;
+ lut[num++] = *value;
+ }
+
+ return 0;
+}
+
+static int readCalibrations(std::vector<AlscCalibration> &calibrations,
+ const libcamera::YamlObject &params,
+ std::string const &name, const Size &size)
+{
+ if (params.contains(name)) {
+ double lastCt = 0;
+ for (const auto &p : params[name].asList()) {
+ auto value = p["ct"].get<double>();
+ if (!value)
+ return -EINVAL;
+ double ct = *value;
+ if (ct <= lastCt) {
+ LOG(RPiAlsc, Error)
+ << "Entries in " << name << " must be in increasing ct order";
+ return -EINVAL;
+ }
+ AlscCalibration calibration;
+ calibration.ct = lastCt = ct;
+
+ const libcamera::YamlObject &table = p["table"];
+ if (table.size() != size.width * size.height) {
+ LOG(RPiAlsc, Error)
+ << "Incorrect number of values for ct "
+ << ct << " in " << name;
+ return -EINVAL;
+ }
+
+ int num = 0;
+ calibration.table.resize(size);
+ for (const auto &elem : table.asList()) {
+ value = elem.get<double>();
+ if (!value)
+ return -EINVAL;
+ calibration.table[num++] = *value;
+ }
+
+ calibrations.push_back(std::move(calibration));
+ LOG(RPiAlsc, Debug)
+ << "Read " << name << " calibration for ct " << ct;
+ }
+ }
+ return 0;
+}
+
+int Alsc::read(const libcamera::YamlObject &params)
+{
+ config_.tableSize = getHardwareConfig().awbRegions;
+ config_.framePeriod = params["frame_period"].get<uint16_t>(12);
+ config_.startupFrames = params["startup_frames"].get<uint16_t>(10);
+ config_.speed = params["speed"].get<double>(0.05);
+ double sigma = params["sigma"].get<double>(0.01);
+ config_.sigmaCr = params["sigma_Cr"].get<double>(sigma);
+ config_.sigmaCb = params["sigma_Cb"].get<double>(sigma);
+ config_.minCount = params["min_count"].get<double>(10.0);
+ config_.minG = params["min_G"].get<uint16_t>(50);
+ config_.omega = params["omega"].get<double>(1.3);
+ config_.nIter = params["n_iter"].get<uint32_t>(config_.tableSize.width + config_.tableSize.height);
+ config_.luminanceStrength =
+ params["luminance_strength"].get<double>(1.0);
+
+ config_.luminanceLut.resize(config_.tableSize, 1.0);
+ int ret = 0;
+
+ if (params.contains("corner_strength"))
+ ret = generateLut(config_.luminanceLut, params);
+ else if (params.contains("luminance_lut"))
+ ret = readLut(config_.luminanceLut, params["luminance_lut"]);
+ else
+ LOG(RPiAlsc, Warning)
+ << "no luminance table - assume unity everywhere";
+ if (ret)
+ return ret;
+
+ ret = readCalibrations(config_.calibrationsCr, params, "calibrations_Cr",
+ config_.tableSize);
+ if (ret)
+ return ret;
+ ret = readCalibrations(config_.calibrationsCb, params, "calibrations_Cb",
+ config_.tableSize);
+ if (ret)
+ return ret;
+
+ config_.defaultCt = params["default_ct"].get<double>(4500.0);
+ config_.threshold = params["threshold"].get<double>(1e-3);
+ config_.lambdaBound = params["lambda_bound"].get<double>(0.05);
+
+ return 0;
+}
+
+static double getCt(Metadata *metadata, double defaultCt);
+static void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
+ Array2D<double> &calTable);
+static void resampleCalTable(const Array2D<double> &calTableIn, CameraMode const &cameraMode,
+ Array2D<double> &calTableOut);
+static void compensateLambdasForCal(const Array2D<double> &calTable,
+ const Array2D<double> &oldLambdas,
+ Array2D<double> &newLambdas);
+static void addLuminanceToTables(std::array<Array2D<double>, 3> &results,
+ const Array2D<double> &lambdaR, double lambdaG,
+ const Array2D<double> &lambdaB,
+ const Array2D<double> &luminanceLut,
+ double luminanceStrength);
+
+void Alsc::initialise()
+{
+ frameCount2_ = frameCount_ = framePhase_ = 0;
+ firstTime_ = true;
+ ct_ = config_.defaultCt;
+
+ const size_t XY = config_.tableSize.width * config_.tableSize.height;
+
+ for (auto &r : syncResults_)
+ r.resize(config_.tableSize);
+ for (auto &r : prevSyncResults_)
+ r.resize(config_.tableSize);
+ for (auto &r : asyncResults_)
+ r.resize(config_.tableSize);
+
+ luminanceTable_.resize(config_.tableSize);
+ asyncLambdaR_.resize(config_.tableSize);
+ asyncLambdaB_.resize(config_.tableSize);
+ /* The lambdas are initialised in the SwitchMode. */
+ lambdaR_.resize(config_.tableSize);
+ lambdaB_.resize(config_.tableSize);
+
+ /* Temporaries for the computations, but sensible to allocate this up-front! */
+ for (auto &c : tmpC_)
+ c.resize(config_.tableSize);
+ for (auto &m : tmpM_)
+ m.resize(XY);
+}
+
+void Alsc::waitForAysncThread()
+{
+ if (asyncStarted_) {
+ asyncStarted_ = false;
+ std::unique_lock<std::mutex> lock(mutex_);
+ syncSignal_.wait(lock, [&] {
+ return asyncFinished_;
+ });
+ asyncFinished_ = false;
+ }
+}
+
+static bool compareModes(CameraMode const &cm0, CameraMode const &cm1)
+{
+ /*
+ * Return true if the modes crop from the sensor significantly differently,
+ * or if the user transform has changed.
+ */
+ if (cm0.transform != cm1.transform)
+ return true;
+ int leftDiff = abs(cm0.cropX - cm1.cropX);
+ int topDiff = abs(cm0.cropY - cm1.cropY);
+ int rightDiff = fabs(cm0.cropX + cm0.scaleX * cm0.width -
+ cm1.cropX - cm1.scaleX * cm1.width);
+ int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height -
+ cm1.cropY - cm1.scaleY * cm1.height);
+ /*
+ * These thresholds are a rather arbitrary amount chosen to trigger
+ * when carrying on with the previously calculated tables might be
+ * worse than regenerating them (but without the adaptive algorithm).
+ */
+ int thresholdX = cm0.sensorWidth >> 4;
+ int thresholdY = cm0.sensorHeight >> 4;
+ return leftDiff > thresholdX || rightDiff > thresholdX ||
+ topDiff > thresholdY || bottomDiff > thresholdY;
+}
+
+void Alsc::switchMode(CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+ /*
+ * We're going to start over with the tables if there's any "significant"
+ * change.
+ */
+ bool resetTables = firstTime_ || compareModes(cameraMode_, cameraMode);
+
+ /* Believe the colour temperature from the AWB, if there is one. */
+ ct_ = getCt(metadata, ct_);
+
+ /* Ensure the other thread isn't running while we do this. */
+ waitForAysncThread();
+
+ cameraMode_ = cameraMode;
+
+ /*
+ * We must resample the luminance table like we do the others, but it's
+ * fixed so we can simply do it up front here.
+ */
+ resampleCalTable(config_.luminanceLut, cameraMode_, luminanceTable_);
+
+ if (resetTables) {
+ /*
+ * Upon every "table reset", arrange for something sensible to be
+ * generated. Construct the tables for the previous recorded colour
+ * temperature. In order to start over from scratch we initialise
+ * the lambdas, but the rest of this code then echoes the code in
+ * doAlsc, without the adaptive algorithm.
+ */
+ std::fill(lambdaR_.begin(), lambdaR_.end(), 1.0);
+ std::fill(lambdaB_.begin(), lambdaB_.end(), 1.0);
+ Array2D<double> &calTableR = tmpC_[0], &calTableB = tmpC_[1], &calTableTmp = tmpC_[2];
+ getCalTable(ct_, config_.calibrationsCr, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableR);
+ getCalTable(ct_, config_.calibrationsCb, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableB);
+ compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
+ compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
+ addLuminanceToTables(syncResults_, asyncLambdaR_, 1.0, asyncLambdaB_,
+ luminanceTable_, config_.luminanceStrength);
+ prevSyncResults_ = syncResults_;
+ framePhase_ = config_.framePeriod; /* run the algo again asap */
+ firstTime_ = false;
+ }
+}
+
+void Alsc::fetchAsyncResults()
+{
+ LOG(RPiAlsc, Debug) << "Fetch ALSC results";
+ asyncFinished_ = false;
+ asyncStarted_ = false;
+ syncResults_ = asyncResults_;
+}
+
+double getCt(Metadata *metadata, double defaultCt)
+{
+ AwbStatus awbStatus;
+ awbStatus.temperatureK = defaultCt; /* in case nothing found */
+ if (metadata->get("awb.status", awbStatus) != 0)
+ LOG(RPiAlsc, Debug) << "no AWB results found, using "
+ << awbStatus.temperatureK;
+ else
+ LOG(RPiAlsc, Debug) << "AWB results found, using "
+ << awbStatus.temperatureK;
+ return awbStatus.temperatureK;
+}
+
+static void copyStats(RgbyRegions &regions, StatisticsPtr &stats,
+ std::array<Array2D<double>, 3> &prevSyncResults)
+{
+ if (!regions.numRegions())
+ regions.init(stats->awbRegions.size());
+
+ const std::vector<double> &rTable = prevSyncResults[0].data(); //status.r;
+ const std::vector<double> &gTable = prevSyncResults[1].data(); //status.g;
+ const std::vector<double> &bTable = prevSyncResults[2].data(); //status.b;
+ for (unsigned int i = 0; i < stats->awbRegions.numRegions(); i++) {
+ auto r = stats->awbRegions.get(i);
+ if (stats->colourStatsPos == Statistics::ColourStatsPos::PostLsc) {
+ r.val.rSum = static_cast<uint64_t>(r.val.rSum / rTable[i]);
+ r.val.gSum = static_cast<uint64_t>(r.val.gSum / gTable[i]);
+ r.val.bSum = static_cast<uint64_t>(r.val.bSum / bTable[i]);
+ }
+ regions.set(i, r);
+ }
+}
+
+void Alsc::restartAsync(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ LOG(RPiAlsc, Debug) << "Starting ALSC calculation";
+ /*
+ * Get the current colour temperature. It's all we need from the
+ * metadata. Default to the last CT value (which could be the default).
+ */
+ ct_ = getCt(imageMetadata, ct_);
+ /*
+ * We have to copy the statistics here, dividing out our best guess of
+ * the LSC table that the pipeline applied to them which we get from
+ * prevSyncResults_.
+ */
+ copyStats(statistics_, stats, prevSyncResults_);
+ framePhase_ = 0;
+ asyncStarted_ = true;
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncStart_ = true;
+ }
+ asyncSignal_.notify_one();
+}
+
+void Alsc::prepare(Metadata *imageMetadata)
+{
+ /*
+ * Count frames since we started, and since we last poked the async
+ * thread.
+ */
+ if (frameCount_ < (int)config_.startupFrames)
+ frameCount_++;
+ double speed = frameCount_ < (int)config_.startupFrames
+ ? 1.0
+ : config_.speed;
+ LOG(RPiAlsc, Debug)
+ << "frame count " << frameCount_ << " speed " << speed;
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (asyncStarted_ && asyncFinished_)
+ fetchAsyncResults();
+ }
+ /* Apply IIR filter to results and program into the pipeline. */
+ for (unsigned int j = 0; j < syncResults_.size(); j++) {
+ for (unsigned int i = 0; i < syncResults_[j].size(); i++)
+ prevSyncResults_[j][i] = speed * syncResults_[j][i] + (1.0 - speed) * prevSyncResults_[j][i];
+ }
+ /* Put output values into status metadata. */
+ AlscStatus status;
+ status.r = prevSyncResults_[0].data();
+ status.g = prevSyncResults_[1].data();
+ status.b = prevSyncResults_[2].data();
+ imageMetadata->set("alsc.status", status);
+ /*
+ * Put the results in the global metadata as well. This will be used by
+ * AWB to factor in the colour shading correction.
+ */
+ getGlobalMetadata().set("alsc.status", status);
+}
+
+void Alsc::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /*
+ * Count frames since we started, and since we last poked the async
+ * thread.
+ */
+ if (framePhase_ < (int)config_.framePeriod)
+ framePhase_++;
+ if (frameCount2_ < (int)config_.startupFrames)
+ frameCount2_++;
+ LOG(RPiAlsc, Debug) << "frame_phase " << framePhase_;
+ if (framePhase_ >= (int)config_.framePeriod ||
+ frameCount2_ < (int)config_.startupFrames) {
+ if (asyncStarted_ == false)
+ restartAsync(stats, imageMetadata);
+ }
+}
+
+void Alsc::asyncFunc()
+{
+ while (true) {
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ asyncSignal_.wait(lock, [&] {
+ return asyncStart_ || asyncAbort_;
+ });
+ asyncStart_ = false;
+ if (asyncAbort_)
+ break;
+ }
+ doAlsc();
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncFinished_ = true;
+ }
+ syncSignal_.notify_one();
+ }
+}
+
+void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
+ Array2D<double> &calTable)
+{
+ if (calibrations.empty()) {
+ std::fill(calTable.begin(), calTable.end(), 1.0);
+ LOG(RPiAlsc, Debug) << "no calibrations found";
+ } else if (ct <= calibrations.front().ct) {
+ calTable = calibrations.front().table;
+ LOG(RPiAlsc, Debug) << "using calibration for "
+ << calibrations.front().ct;
+ } else if (ct >= calibrations.back().ct) {
+ calTable = calibrations.back().table;
+ LOG(RPiAlsc, Debug) << "using calibration for "
+ << calibrations.back().ct;
+ } else {
+ int idx = 0;
+ while (ct > calibrations[idx + 1].ct)
+ idx++;
+ double ct0 = calibrations[idx].ct, ct1 = calibrations[idx + 1].ct;
+ LOG(RPiAlsc, Debug)
+ << "ct is " << ct << ", interpolating between "
+ << ct0 << " and " << ct1;
+ for (unsigned int i = 0; i < calTable.size(); i++)
+ calTable[i] =
+ (calibrations[idx].table[i] * (ct1 - ct) +
+ calibrations[idx + 1].table[i] * (ct - ct0)) /
+ (ct1 - ct0);
+ }
+}
+
+void resampleCalTable(const Array2D<double> &calTableIn,
+ CameraMode const &cameraMode,
+ Array2D<double> &calTableOut)
+{
+ int X = calTableIn.dimensions().width;
+ int Y = calTableIn.dimensions().height;
+
+ /*
+ * Precalculate and cache the x sampling locations and phases to save
+ * recomputing them on every row.
+ */
+ int xLo[X], xHi[X];
+ double xf[X];
+ double scaleX = cameraMode.sensorWidth /
+ (cameraMode.width * cameraMode.scaleX);
+ double xOff = cameraMode.cropX / (double)cameraMode.sensorWidth;
+ double x = .5 / scaleX + xOff * X - .5;
+ double xInc = 1 / scaleX;
+ for (int i = 0; i < X; i++, x += xInc) {
+ xLo[i] = floor(x);
+ xf[i] = x - xLo[i];
+ xHi[i] = std::min(xLo[i] + 1, X - 1);
+ xLo[i] = std::max(xLo[i], 0);
+ if (!!(cameraMode.transform & libcamera::Transform::HFlip)) {
+ xLo[i] = X - 1 - xLo[i];
+ xHi[i] = X - 1 - xHi[i];
+ }
+ }
+ /* Now march over the output table generating the new values. */
+ double scaleY = cameraMode.sensorHeight /
+ (cameraMode.height * cameraMode.scaleY);
+ double yOff = cameraMode.cropY / (double)cameraMode.sensorHeight;
+ double y = .5 / scaleY + yOff * Y - .5;
+ double yInc = 1 / scaleY;
+ for (int j = 0; j < Y; j++, y += yInc) {
+ int yLo = floor(y);
+ double yf = y - yLo;
+ int yHi = std::min(yLo + 1, Y - 1);
+ yLo = std::max(yLo, 0);
+ if (!!(cameraMode.transform & libcamera::Transform::VFlip)) {
+ yLo = Y - 1 - yLo;
+ yHi = Y - 1 - yHi;
+ }
+ double const *rowAbove = calTableIn.ptr() + X * yLo;
+ double const *rowBelow = calTableIn.ptr() + X * yHi;
+ double *out = calTableOut.ptr() + X * j;
+ for (int i = 0; i < X; i++) {
+ double above = rowAbove[xLo[i]] * (1 - xf[i]) +
+ rowAbove[xHi[i]] * xf[i];
+ double below = rowBelow[xLo[i]] * (1 - xf[i]) +
+ rowBelow[xHi[i]] * xf[i];
+ *(out++) = above * (1 - yf) + below * yf;
+ }
+ }
+}
+
+/* Calculate chrominance statistics (R/G and B/G) for each region. */
+static void calculateCrCb(const RgbyRegions &awbRegion, Array2D<double> &cr,
+ Array2D<double> &cb, uint32_t minCount, uint16_t minG)
+{
+ for (unsigned int i = 0; i < cr.size(); i++) {
+ auto s = awbRegion.get(i);
+
+ /* Do not return unreliable, or zero, colour ratio statistics. */
+ if (s.counted <= minCount || s.val.gSum / s.counted <= minG ||
+ s.val.rSum / s.counted <= minG || s.val.bSum / s.counted <= minG) {
+ cr[i] = cb[i] = InsufficientData;
+ continue;
+ }
+
+ cr[i] = s.val.rSum / (double)s.val.gSum;
+ cb[i] = s.val.bSum / (double)s.val.gSum;
+ }
+}
+
+static void applyCalTable(const Array2D<double> &calTable, Array2D<double> &C)
+{
+ for (unsigned int i = 0; i < C.size(); i++)
+ if (C[i] != InsufficientData)
+ C[i] *= calTable[i];
+}
+
+void compensateLambdasForCal(const Array2D<double> &calTable,
+ const Array2D<double> &oldLambdas,
+ Array2D<double> &newLambdas)
+{
+ double minNewLambda = std::numeric_limits<double>::max();
+ for (unsigned int i = 0; i < newLambdas.size(); i++) {
+ newLambdas[i] = oldLambdas[i] * calTable[i];
+ minNewLambda = std::min(minNewLambda, newLambdas[i]);
+ }
+ for (unsigned int i = 0; i < newLambdas.size(); i++)
+ newLambdas[i] /= minNewLambda;
+}
+
+[[maybe_unused]] static void printCalTable(const Array2D<double> &C)
+{
+ const Size &size = C.dimensions();
+ printf("table: [\n");
+ for (unsigned int j = 0; j < size.height; j++) {
+ for (unsigned int i = 0; i < size.width; i++) {
+ printf("%5.3f", 1.0 / C[j * size.width + i]);
+ if (i != size.width - 1 || j != size.height - 1)
+ printf(",");
+ }
+ printf("\n");
+ }
+ printf("]\n");
+}
+
+/*
+ * Compute weight out of 1.0 which reflects how similar we wish to make the
+ * colours of these two regions.
+ */
+static double computeWeight(double Ci, double Cj, double sigma)
+{
+ if (Ci == InsufficientData || Cj == InsufficientData)
+ return 0;
+ double diff = (Ci - Cj) / sigma;
+ return exp(-diff * diff / 2);
+}
+
+/* Compute all weights. */
+static void computeW(const Array2D<double> &C, double sigma,
+ SparseArray<double> &W)
+{
+ size_t XY = C.size();
+ size_t X = C.dimensions().width;
+
+ for (unsigned int i = 0; i < XY; i++) {
+ /* Start with neighbour above and go clockwise. */
+ W[i][0] = i >= X ? computeWeight(C[i], C[i - X], sigma) : 0;
+ W[i][1] = i % X < X - 1 ? computeWeight(C[i], C[i + 1], sigma) : 0;
+ W[i][2] = i < XY - X ? computeWeight(C[i], C[i + X], sigma) : 0;
+ W[i][3] = i % X ? computeWeight(C[i], C[i - 1], sigma) : 0;
+ }
+}
+
+/* Compute M, the large but sparse matrix such that M * lambdas = 0. */
+static void constructM(const Array2D<double> &C,
+ const SparseArray<double> &W,
+ SparseArray<double> &M)
+{
+ size_t XY = C.size();
+ size_t X = C.dimensions().width;
+
+ double epsilon = 0.001;
+ for (unsigned int i = 0; i < XY; i++) {
+ /*
+ * Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
+ * be zero so the equation is still set up correctly.
+ */
+ int m = !!(i >= X) + !!(i % X < X - 1) + !!(i < XY - X) +
+ !!(i % X); /* total number of neighbours */
+ /* we'll divide the diagonal out straight away */
+ double diagonal = (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) * C[i];
+ M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][1] = i % X < X - 1 ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][2] = i < XY - X ? (W[i][2] * C[i + X] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][3] = i % X ? (W[i][3] * C[i - 1] + epsilon / m * C[i]) / diagonal : 0;
+ }
+}
+
+/*
+ * In the compute_lambda_ functions, note that the matrix coefficients for the
+ * left/right neighbours are zero down the left/right edges, so we don't need
+ * need to test the i value to exclude them.
+ */
+static double computeLambdaBottom(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + lambda.dimensions().width] +
+ M[i][3] * lambda[i - 1];
+}
+static double computeLambdaBottomStart(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + lambda.dimensions().width];
+}
+static double computeLambdaInterior(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][1] * lambda[i + 1] +
+ M[i][2] * lambda[i + lambda.dimensions().width] + M[i][3] * lambda[i - 1];
+}
+static double computeLambdaTop(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][1] * lambda[i + 1] +
+ M[i][3] * lambda[i - 1];
+}
+static double computeLambdaTopEnd(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][3] * lambda[i - 1];
+}
+
+/* Gauss-Seidel iteration with over-relaxation. */
+static double gaussSeidel2Sor(const SparseArray<double> &M, double omega,
+ Array2D<double> &lambda, double lambdaBound)
+{
+ int XY = lambda.size();
+ int X = lambda.dimensions().width;
+ const double min = 1 - lambdaBound, max = 1 + lambdaBound;
+ Array2D<double> oldLambda = lambda;
+ int i;
+ lambda[0] = computeLambdaBottomStart(0, M, lambda);
+ lambda[0] = std::clamp(lambda[0], min, max);
+ for (i = 1; i < X; i++) {
+ lambda[i] = computeLambdaBottom(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ for (; i < XY - X; i++) {
+ lambda[i] = computeLambdaInterior(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ for (; i < XY - 1; i++) {
+ lambda[i] = computeLambdaTop(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ lambda[i] = computeLambdaTopEnd(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ /*
+ * Also solve the system from bottom to top, to help spread the updates
+ * better.
+ */
+ lambda[i] = computeLambdaTopEnd(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ for (i = XY - 2; i >= XY - X; i--) {
+ lambda[i] = computeLambdaTop(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ for (; i >= X; i--) {
+ lambda[i] = computeLambdaInterior(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ for (; i >= 1; i--) {
+ lambda[i] = computeLambdaBottom(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ lambda[0] = computeLambdaBottomStart(0, M, lambda);
+ lambda[0] = std::clamp(lambda[0], min, max);
+ double maxDiff = 0;
+ for (i = 0; i < XY; i++) {
+ lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega;
+ if (fabs(lambda[i] - oldLambda[i]) > fabs(maxDiff))
+ maxDiff = lambda[i] - oldLambda[i];
+ }
+ return maxDiff;
+}
+
+/* Normalise the values so that the smallest value is 1. */
+static void normalise(Array2D<double> &results)
+{
+ double minval = *std::min_element(results.begin(), results.end());
+ std::for_each(results.begin(), results.end(),
+ [minval](double val) { return val / minval; });
+}
+
+/* Rescale the values so that the average value is 1. */
+static void reaverage(Array2D<double> &data)
+{
+ double sum = std::accumulate(data.begin(), data.end(), 0.0);
+ double ratio = 1 / (sum / data.size());
+ std::for_each(data.begin(), data.end(),
+ [ratio](double val) { return val * ratio; });
+}
+
+static void runMatrixIterations(const Array2D<double> &C,
+ Array2D<double> &lambda,
+ const SparseArray<double> &W,
+ SparseArray<double> &M, double omega,
+ unsigned int nIter, double threshold, double lambdaBound)
+{
+ constructM(C, W, M);
+ double lastMaxDiff = std::numeric_limits<double>::max();
+ for (unsigned int i = 0; i < nIter; i++) {
+ double maxDiff = fabs(gaussSeidel2Sor(M, omega, lambda, lambdaBound));
+ if (maxDiff < threshold) {
+ LOG(RPiAlsc, Debug)
+ << "Stop after " << i + 1 << " iterations";
+ break;
+ }
+ /*
+ * this happens very occasionally (so make a note), though
+ * doesn't seem to matter
+ */
+ if (maxDiff > lastMaxDiff)
+ LOG(RPiAlsc, Debug)
+ << "Iteration " << i << ": maxDiff gone up "
+ << lastMaxDiff << " to " << maxDiff;
+ lastMaxDiff = maxDiff;
+ }
+ /* We're going to normalise the lambdas so the total average is 1. */
+ reaverage(lambda);
+}
+
+static void addLuminanceRb(Array2D<double> &result, const Array2D<double> &lambda,
+ const Array2D<double> &luminanceLut,
+ double luminanceStrength)
+{
+ for (unsigned int i = 0; i < result.size(); i++)
+ result[i] = lambda[i] * ((luminanceLut[i] - 1) * luminanceStrength + 1);
+}
+
+static void addLuminanceG(Array2D<double> &result, double lambda,
+ const Array2D<double> &luminanceLut,
+ double luminanceStrength)
+{
+ for (unsigned int i = 0; i < result.size(); i++)
+ result[i] = lambda * ((luminanceLut[i] - 1) * luminanceStrength + 1);
+}
+
+void addLuminanceToTables(std::array<Array2D<double>, 3> &results,
+ const Array2D<double> &lambdaR,
+ double lambdaG, const Array2D<double> &lambdaB,
+ const Array2D<double> &luminanceLut,
+ double luminanceStrength)
+{
+ addLuminanceRb(results[0], lambdaR, luminanceLut, luminanceStrength);
+ addLuminanceG(results[1], lambdaG, luminanceLut, luminanceStrength);
+ addLuminanceRb(results[2], lambdaB, luminanceLut, luminanceStrength);
+ for (auto &r : results)
+ normalise(r);
+}
+
+void Alsc::doAlsc()
+{
+ Array2D<double> &cr = tmpC_[0], &cb = tmpC_[1], &calTableR = tmpC_[2],
+ &calTableB = tmpC_[3], &calTableTmp = tmpC_[4];
+ SparseArray<double> &wr = tmpM_[0], &wb = tmpM_[1], &M = tmpM_[2];
+
+ /*
+ * Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
+ * usable.
+ */
+ calculateCrCb(statistics_, cr, cb, config_.minCount, config_.minG);
+ /*
+ * Fetch the new calibrations (if any) for this CT. Resample them in
+ * case the camera mode is not full-frame.
+ */
+ getCalTable(ct_, config_.calibrationsCr, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableR);
+ getCalTable(ct_, config_.calibrationsCb, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableB);
+ /*
+ * You could print out the cal tables for this image here, if you're
+ * tuning the algorithm...
+ * Apply any calibration to the statistics, so the adaptive algorithm
+ * makes only the extra adjustments.
+ */
+ applyCalTable(calTableR, cr);
+ applyCalTable(calTableB, cb);
+ /* Compute weights between zones. */
+ computeW(cr, config_.sigmaCr, wr);
+ computeW(cb, config_.sigmaCb, wb);
+ /* Run Gauss-Seidel iterations over the resulting matrix, for R and B. */
+ runMatrixIterations(cr, lambdaR_, wr, M, config_.omega, config_.nIter,
+ config_.threshold, config_.lambdaBound);
+ runMatrixIterations(cb, lambdaB_, wb, M, config_.omega, config_.nIter,
+ config_.threshold, config_.lambdaBound);
+ /*
+ * Fold the calibrated gains into our final lambda values. (Note that on
+ * the next run, we re-start with the lambda values that don't have the
+ * calibration gains included.)
+ */
+ compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
+ compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
+ /* Fold in the luminance table at the appropriate strength. */
+ addLuminanceToTables(asyncResults_, asyncLambdaR_, 1.0,
+ asyncLambdaB_, luminanceTable_,
+ config_.luminanceStrength);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Alsc(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/alsc.h b/src/ipa/rpi/controller/rpi/alsc.h
new file mode 100644
index 00000000..31087982
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/alsc.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ALSC (auto lens shading correction) control algorithm
+ */
+#pragma once
+
+#include <array>
+#include <mutex>
+#include <condition_variable>
+#include <thread>
+#include <vector>
+
+#include <libcamera/geometry.h>
+
+#include "../algorithm.h"
+#include "../alsc_status.h"
+#include "../statistics.h"
+
+namespace RPiController {
+
+/* Algorithm to generate automagic LSC (Lens Shading Correction) tables. */
+
+/*
+ * The Array2D class is a very thin wrapper round std::vector so that it can
+ * be used in exactly the same way in the code but carries its correct width
+ * and height ("dimensions") with it.
+ */
+
+template<typename T>
+class Array2D
+{
+public:
+ using Size = libcamera::Size;
+
+ const Size &dimensions() const { return dimensions_; }
+
+ size_t size() const { return data_.size(); }
+
+ const std::vector<T> &data() const { return data_; }
+
+ void resize(const Size &dims)
+ {
+ dimensions_ = dims;
+ data_.resize(dims.width * dims.height);
+ }
+
+ void resize(const Size &dims, const T &value)
+ {
+ resize(dims);
+ std::fill(data_.begin(), data_.end(), value);
+ }
+
+ T &operator[](int index) { return data_[index]; }
+
+ const T &operator[](int index) const { return data_[index]; }
+
+ T *ptr() { return data_.data(); }
+
+ const T *ptr() const { return data_.data(); }
+
+ auto begin() { return data_.begin(); }
+ auto end() { return data_.end(); }
+
+private:
+ Size dimensions_;
+ std::vector<T> data_;
+};
+
+/*
+ * We'll use the term SparseArray for the large sparse matrices that are
+ * XY tall but have only 4 non-zero elements on each row.
+ */
+
+template<typename T>
+using SparseArray = std::vector<std::array<T, 4>>;
+
+struct AlscCalibration {
+ double ct;
+ Array2D<double> table;
+};
+
+struct AlscConfig {
+ /* Only repeat the ALSC calculation every "this many" frames */
+ uint16_t framePeriod;
+ /* number of initial frames for which speed taken as 1.0 (maximum) */
+ uint16_t startupFrames;
+ /* IIR filter speed applied to algorithm results */
+ double speed;
+ double sigmaCr;
+ double sigmaCb;
+ double minCount;
+ uint16_t minG;
+ double omega;
+ uint32_t nIter;
+ Array2D<double> luminanceLut;
+ double luminanceStrength;
+ std::vector<AlscCalibration> calibrationsCr;
+ std::vector<AlscCalibration> calibrationsCb;
+ double defaultCt; /* colour temperature if no metadata found */
+ double threshold; /* iteration termination threshold */
+ double lambdaBound; /* upper/lower bound for lambda from a value of 1 */
+ libcamera::Size tableSize;
+};
+
+class Alsc : public Algorithm
+{
+public:
+ Alsc(Controller *controller = NULL);
+ ~Alsc();
+ char const *name() const override;
+ void initialise() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+private:
+ /* configuration is read-only, and available to both threads */
+ AlscConfig config_;
+ bool firstTime_;
+ CameraMode cameraMode_;
+ Array2D<double> luminanceTable_;
+ std::thread asyncThread_;
+ void asyncFunc(); /* asynchronous thread function */
+ std::mutex mutex_;
+ /* condvar for async thread to wait on */
+ std::condition_variable asyncSignal_;
+ /* condvar for synchronous thread to wait on */
+ std::condition_variable syncSignal_;
+ /* for sync thread to check if async thread finished (requires mutex) */
+ bool asyncFinished_;
+ /* for async thread to check if it's been told to run (requires mutex) */
+ bool asyncStart_;
+ /* for async thread to check if it's been told to quit (requires mutex) */
+ bool asyncAbort_;
+
+ /*
+ * The following are only for the synchronous thread to use:
+ * for sync thread to note its has asked async thread to run
+ */
+ bool asyncStarted_;
+ /* counts up to framePeriod before restarting the async thread */
+ int framePhase_;
+ /* counts up to startupFrames */
+ int frameCount_;
+ /* counts up to startupFrames for Process function */
+ int frameCount2_;
+ std::array<Array2D<double>, 3> syncResults_;
+ std::array<Array2D<double>, 3> prevSyncResults_;
+ void waitForAysncThread();
+ /*
+ * The following are for the asynchronous thread to use, though the main
+ * thread can set/reset them if the async thread is known to be idle:
+ */
+ void restartAsync(StatisticsPtr &stats, Metadata *imageMetadata);
+ /* copy out the results from the async thread so that it can be restarted */
+ void fetchAsyncResults();
+ double ct_;
+ RgbyRegions statistics_;
+ std::array<Array2D<double>, 3> asyncResults_;
+ Array2D<double> asyncLambdaR_;
+ Array2D<double> asyncLambdaB_;
+ void doAlsc();
+ Array2D<double> lambdaR_;
+ Array2D<double> lambdaB_;
+
+ /* Temporaries for the computations */
+ std::array<Array2D<double>, 5> tmpC_;
+ std::array<SparseArray<double>, 3> tmpM_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/awb.cpp b/src/ipa/rpi/controller/rpi/awb.cpp
new file mode 100644
index 00000000..abe5906e
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/awb.cpp
@@ -0,0 +1,751 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AWB control algorithm
+ */
+
+#include <assert.h>
+#include <functional>
+
+#include <libcamera/base/log.h>
+
+#include "../lux_status.h"
+
+#include "alsc_status.h"
+#include "awb.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiAwb)
+
+#define NAME "rpi.awb"
+
+/*
+ * todo - the locking in this algorithm needs some tidying up as has been done
+ * elsewhere (ALSC and AGC).
+ */
+
+int AwbMode::read(const libcamera::YamlObject &params)
+{
+ auto value = params["lo"].get<double>();
+ if (!value)
+ return -EINVAL;
+ ctLo = *value;
+
+ value = params["hi"].get<double>();
+ if (!value)
+ return -EINVAL;
+ ctHi = *value;
+
+ return 0;
+}
+
+int AwbPrior::read(const libcamera::YamlObject &params)
+{
+ auto value = params["lux"].get<double>();
+ if (!value)
+ return -EINVAL;
+ lux = *value;
+
+ return prior.read(params["prior"]);
+}
+
+static int readCtCurve(Pwl &ctR, Pwl &ctB, const libcamera::YamlObject &params)
+{
+ if (params.size() % 3) {
+ LOG(RPiAwb, Error) << "AwbConfig: incomplete CT curve entry";
+ return -EINVAL;
+ }
+
+ if (params.size() < 6) {
+ LOG(RPiAwb, Error) << "AwbConfig: insufficient points in CT curve";
+ return -EINVAL;
+ }
+
+ const auto &list = params.asList();
+
+ for (auto it = list.begin(); it != list.end(); it++) {
+ auto value = it->get<double>();
+ if (!value)
+ return -EINVAL;
+ double ct = *value;
+
+ assert(it == list.begin() || ct != ctR.domain().end);
+
+ value = (++it)->get<double>();
+ if (!value)
+ return -EINVAL;
+ ctR.append(ct, *value);
+
+ value = (++it)->get<double>();
+ if (!value)
+ return -EINVAL;
+ ctB.append(ct, *value);
+ }
+
+ return 0;
+}
+
+int AwbConfig::read(const libcamera::YamlObject &params)
+{
+ int ret;
+
+ bayes = params["bayes"].get<int>(1);
+ framePeriod = params["frame_period"].get<uint16_t>(10);
+ startupFrames = params["startup_frames"].get<uint16_t>(10);
+ convergenceFrames = params["convergence_frames"].get<unsigned int>(3);
+ speed = params["speed"].get<double>(0.05);
+
+ if (params.contains("ct_curve")) {
+ ret = readCtCurve(ctR, ctB, params["ct_curve"]);
+ if (ret)
+ return ret;
+ /* We will want the inverse functions of these too. */
+ ctRInverse = ctR.inverse();
+ ctBInverse = ctB.inverse();
+ }
+
+ if (params.contains("priors")) {
+ for (const auto &p : params["priors"].asList()) {
+ AwbPrior prior;
+ ret = prior.read(p);
+ if (ret)
+ return ret;
+ if (!priors.empty() && prior.lux <= priors.back().lux) {
+ LOG(RPiAwb, Error) << "AwbConfig: Prior must be ordered in increasing lux value";
+ return -EINVAL;
+ }
+ priors.push_back(prior);
+ }
+ if (priors.empty()) {
+ LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured";
+ return ret;
+ }
+ }
+ if (params.contains("modes")) {
+ for (const auto &[key, value] : params["modes"].asDict()) {
+ ret = modes[key].read(value);
+ if (ret)
+ return ret;
+ if (defaultMode == nullptr)
+ defaultMode = &modes[key];
+ }
+ if (defaultMode == nullptr) {
+ LOG(RPiAwb, Error) << "AwbConfig: no AWB modes configured";
+ return -EINVAL;
+ }
+ }
+
+ minPixels = params["min_pixels"].get<double>(16.0);
+ minG = params["min_G"].get<uint16_t>(32);
+ minRegions = params["min_regions"].get<uint32_t>(10);
+ deltaLimit = params["delta_limit"].get<double>(0.2);
+ coarseStep = params["coarse_step"].get<double>(0.2);
+ transversePos = params["transverse_pos"].get<double>(0.01);
+ transverseNeg = params["transverse_neg"].get<double>(0.01);
+ if (transversePos <= 0 || transverseNeg <= 0) {
+ LOG(RPiAwb, Error) << "AwbConfig: transverse_pos/neg must be > 0";
+ return -EINVAL;
+ }
+
+ sensitivityR = params["sensitivity_r"].get<double>(1.0);
+ sensitivityB = params["sensitivity_b"].get<double>(1.0);
+
+ if (bayes) {
+ if (ctR.empty() || ctB.empty() || priors.empty() ||
+ defaultMode == nullptr) {
+ LOG(RPiAwb, Warning)
+ << "Bayesian AWB mis-configured - switch to Grey method";
+ bayes = false;
+ }
+ }
+ fast = params[fast].get<int>(bayes); /* default to fast for Bayesian, otherwise slow */
+ whitepointR = params["whitepoint_r"].get<double>(0.0);
+ whitepointB = params["whitepoint_b"].get<double>(0.0);
+ if (bayes == false)
+ sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */
+ return 0;
+}
+
+Awb::Awb(Controller *controller)
+ : AwbAlgorithm(controller)
+{
+ asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false;
+ mode_ = nullptr;
+ manualR_ = manualB_ = 0.0;
+ asyncThread_ = std::thread(std::bind(&Awb::asyncFunc, this));
+}
+
+Awb::~Awb()
+{
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncAbort_ = true;
+ }
+ asyncSignal_.notify_one();
+ asyncThread_.join();
+}
+
+char const *Awb::name() const
+{
+ return NAME;
+}
+
+int Awb::read(const libcamera::YamlObject &params)
+{
+ return config_.read(params);
+}
+
+void Awb::initialise()
+{
+ frameCount_ = framePhase_ = 0;
+ /*
+ * Put something sane into the status that we are filtering towards,
+ * just in case the first few frames don't have anything meaningful in
+ * them.
+ */
+ if (!config_.ctR.empty() && !config_.ctB.empty()) {
+ syncResults_.temperatureK = config_.ctR.domain().clip(4000);
+ syncResults_.gainR = 1.0 / config_.ctR.eval(syncResults_.temperatureK);
+ syncResults_.gainG = 1.0;
+ syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK);
+ } else {
+ /* random values just to stop the world blowing up */
+ syncResults_.temperatureK = 4500;
+ syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0;
+ }
+ prevSyncResults_ = syncResults_;
+ asyncResults_ = syncResults_;
+}
+
+void Awb::initialValues(double &gainR, double &gainB)
+{
+ gainR = syncResults_.gainR;
+ gainB = syncResults_.gainB;
+}
+
+void Awb::disableAuto()
+{
+ /* Freeze the most recent values, and treat them as manual gains */
+ manualR_ = syncResults_.gainR = prevSyncResults_.gainR;
+ manualB_ = syncResults_.gainB = prevSyncResults_.gainB;
+ syncResults_.gainG = prevSyncResults_.gainG;
+ syncResults_.temperatureK = prevSyncResults_.temperatureK;
+}
+
+void Awb::enableAuto()
+{
+ manualR_ = 0.0;
+ manualB_ = 0.0;
+}
+
+unsigned int Awb::getConvergenceFrames() const
+{
+ /*
+ * If not in auto mode, there is no convergence
+ * to happen, so no need to drop any frames - return zero.
+ */
+ if (!isAutoEnabled())
+ return 0;
+ else
+ return config_.convergenceFrames;
+}
+
+void Awb::setMode(std::string const &modeName)
+{
+ modeName_ = modeName;
+}
+
+void Awb::setManualGains(double manualR, double manualB)
+{
+ /* If any of these are 0.0, we swich back to auto. */
+ manualR_ = manualR;
+ manualB_ = manualB;
+ /*
+ * If not in auto mode, set these values into the syncResults which
+ * means that Prepare() will adopt them immediately.
+ */
+ if (!isAutoEnabled()) {
+ syncResults_.gainR = prevSyncResults_.gainR = manualR_;
+ syncResults_.gainG = prevSyncResults_.gainG = 1.0;
+ syncResults_.gainB = prevSyncResults_.gainB = manualB_;
+ if (config_.bayes) {
+ /* Also estimate the best corresponding colour temperature from the curves. */
+ double ctR = config_.ctRInverse.eval(config_.ctRInverse.domain().clip(1 / manualR_));
+ double ctB = config_.ctBInverse.eval(config_.ctBInverse.domain().clip(1 / manualB_));
+ prevSyncResults_.temperatureK = (ctR + ctB) / 2;
+ syncResults_.temperatureK = prevSyncResults_.temperatureK;
+ }
+ }
+}
+
+void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode,
+ Metadata *metadata)
+{
+ /* Let other algorithms know the current white balance values. */
+ metadata->set("awb.status", prevSyncResults_);
+}
+
+bool Awb::isAutoEnabled() const
+{
+ return manualR_ == 0.0 || manualB_ == 0.0;
+}
+
+void Awb::fetchAsyncResults()
+{
+ LOG(RPiAwb, Debug) << "Fetch AWB results";
+ asyncFinished_ = false;
+ asyncStarted_ = false;
+ /*
+ * It's possible manual gains could be set even while the async
+ * thread was running, so only copy the results if still in auto mode.
+ */
+ if (isAutoEnabled())
+ syncResults_ = asyncResults_;
+}
+
+void Awb::restartAsync(StatisticsPtr &stats, double lux)
+{
+ LOG(RPiAwb, Debug) << "Starting AWB calculation";
+ /* this makes a new reference which belongs to the asynchronous thread */
+ statistics_ = stats;
+ /* store the mode as it could technically change */
+ auto m = config_.modes.find(modeName_);
+ mode_ = m != config_.modes.end()
+ ? &m->second
+ : (mode_ == nullptr ? config_.defaultMode : mode_);
+ lux_ = lux;
+ framePhase_ = 0;
+ asyncStarted_ = true;
+ size_t len = modeName_.copy(asyncResults_.mode,
+ sizeof(asyncResults_.mode) - 1);
+ asyncResults_.mode[len] = '\0';
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncStart_ = true;
+ }
+ asyncSignal_.notify_one();
+}
+
+void Awb::prepare(Metadata *imageMetadata)
+{
+ if (frameCount_ < (int)config_.startupFrames)
+ frameCount_++;
+ double speed = frameCount_ < (int)config_.startupFrames
+ ? 1.0
+ : config_.speed;
+ LOG(RPiAwb, Debug)
+ << "frame_count " << frameCount_ << " speed " << speed;
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (asyncStarted_ && asyncFinished_)
+ fetchAsyncResults();
+ }
+ /* Finally apply IIR filter to results and put into metadata. */
+ memcpy(prevSyncResults_.mode, syncResults_.mode,
+ sizeof(prevSyncResults_.mode));
+ prevSyncResults_.temperatureK = speed * syncResults_.temperatureK +
+ (1.0 - speed) * prevSyncResults_.temperatureK;
+ prevSyncResults_.gainR = speed * syncResults_.gainR +
+ (1.0 - speed) * prevSyncResults_.gainR;
+ prevSyncResults_.gainG = speed * syncResults_.gainG +
+ (1.0 - speed) * prevSyncResults_.gainG;
+ prevSyncResults_.gainB = speed * syncResults_.gainB +
+ (1.0 - speed) * prevSyncResults_.gainB;
+ imageMetadata->set("awb.status", prevSyncResults_);
+ LOG(RPiAwb, Debug)
+ << "Using AWB gains r " << prevSyncResults_.gainR << " g "
+ << prevSyncResults_.gainG << " b "
+ << prevSyncResults_.gainB;
+}
+
+void Awb::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /* Count frames since we last poked the async thread. */
+ if (framePhase_ < (int)config_.framePeriod)
+ framePhase_++;
+ LOG(RPiAwb, Debug) << "frame_phase " << framePhase_;
+ /* We do not restart the async thread if we're not in auto mode. */
+ if (isAutoEnabled() &&
+ (framePhase_ >= (int)config_.framePeriod ||
+ frameCount_ < (int)config_.startupFrames)) {
+ /* Update any settings and any image metadata that we need. */
+ struct LuxStatus luxStatus = {};
+ luxStatus.lux = 400; /* in case no metadata */
+ if (imageMetadata->get("lux.status", luxStatus) != 0)
+ LOG(RPiAwb, Debug) << "No lux metadata found";
+ LOG(RPiAwb, Debug) << "Awb lux value is " << luxStatus.lux;
+
+ if (asyncStarted_ == false)
+ restartAsync(stats, luxStatus.lux);
+ }
+}
+
+void Awb::asyncFunc()
+{
+ while (true) {
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ asyncSignal_.wait(lock, [&] {
+ return asyncStart_ || asyncAbort_;
+ });
+ asyncStart_ = false;
+ if (asyncAbort_)
+ break;
+ }
+ doAwb();
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncFinished_ = true;
+ }
+ syncSignal_.notify_one();
+ }
+}
+
+static void generateStats(std::vector<Awb::RGB> &zones,
+ StatisticsPtr &stats, double minPixels,
+ double minG, Metadata &globalMetadata)
+{
+ std::scoped_lock<RPiController::Metadata> l(globalMetadata);
+
+ for (unsigned int i = 0; i < stats->awbRegions.numRegions(); i++) {
+ Awb::RGB zone;
+ auto &region = stats->awbRegions.get(i);
+ if (region.counted >= minPixels) {
+ zone.G = region.val.gSum / region.counted;
+ if (zone.G < minG)
+ continue;
+ zone.R = region.val.rSum / region.counted;
+ zone.B = region.val.bSum / region.counted;
+ /* Factor in the ALSC applied colour shading correction if required. */
+ const AlscStatus *alscStatus = globalMetadata.getLocked<AlscStatus>("alsc.status");
+ if (stats->colourStatsPos == Statistics::ColourStatsPos::PreLsc && alscStatus) {
+ zone.R *= alscStatus->r[i];
+ zone.G *= alscStatus->g[i];
+ zone.B *= alscStatus->b[i];
+ }
+ zones.push_back(zone);
+ }
+ }
+}
+
+void Awb::prepareStats()
+{
+ zones_.clear();
+ /*
+ * LSC has already been applied to the stats in this pipeline, so stop
+ * any LSC compensation. We also ignore config_.fast in this version.
+ */
+ generateStats(zones_, statistics_, config_.minPixels,
+ config_.minG, getGlobalMetadata());
+ /*
+ * apply sensitivities, so values appear to come from our "canonical"
+ * sensor.
+ */
+ for (auto &zone : zones_) {
+ zone.R *= config_.sensitivityR;
+ zone.B *= config_.sensitivityB;
+ }
+}
+
+double Awb::computeDelta2Sum(double gainR, double gainB)
+{
+ /*
+ * Compute the sum of the squared colour error (non-greyness) as it
+ * appears in the log likelihood equation.
+ */
+ double delta2Sum = 0;
+ for (auto &z : zones_) {
+ double deltaR = gainR * z.R - 1 - config_.whitepointR;
+ double deltaB = gainB * z.B - 1 - config_.whitepointB;
+ double delta2 = deltaR * deltaR + deltaB * deltaB;
+ /* LOG(RPiAwb, Debug) << "deltaR " << deltaR << " deltaB " << deltaB << " delta2 " << delta2; */
+ delta2 = std::min(delta2, config_.deltaLimit);
+ delta2Sum += delta2;
+ }
+ return delta2Sum;
+}
+
+Pwl Awb::interpolatePrior()
+{
+ /*
+ * Interpolate the prior log likelihood function for our current lux
+ * value.
+ */
+ if (lux_ <= config_.priors.front().lux)
+ return config_.priors.front().prior;
+ else if (lux_ >= config_.priors.back().lux)
+ return config_.priors.back().prior;
+ else {
+ int idx = 0;
+ /* find which two we lie between */
+ while (config_.priors[idx + 1].lux < lux_)
+ idx++;
+ double lux0 = config_.priors[idx].lux,
+ lux1 = config_.priors[idx + 1].lux;
+ return Pwl::combine(config_.priors[idx].prior,
+ config_.priors[idx + 1].prior,
+ [&](double /*x*/, double y0, double y1) {
+ return y0 + (y1 - y0) *
+ (lux_ - lux0) / (lux1 - lux0);
+ });
+ }
+}
+
+static double interpolateQuadatric(Pwl::Point const &a, Pwl::Point const &b,
+ Pwl::Point const &c)
+{
+ /*
+ * Given 3 points on a curve, find the extremum of the function in that
+ * interval by fitting a quadratic.
+ */
+ const double eps = 1e-3;
+ Pwl::Point ca = c - a, ba = b - a;
+ double denominator = 2 * (ba.y * ca.x - ca.y * ba.x);
+ if (abs(denominator) > eps) {
+ double numerator = ba.y * ca.x * ca.x - ca.y * ba.x * ba.x;
+ double result = numerator / denominator + a.x;
+ return std::max(a.x, std::min(c.x, result));
+ }
+ /* has degenerated to straight line segment */
+ return a.y < c.y - eps ? a.x : (c.y < a.y - eps ? c.x : b.x);
+}
+
+double Awb::coarseSearch(Pwl const &prior)
+{
+ points_.clear(); /* assume doesn't deallocate memory */
+ size_t bestPoint = 0;
+ double t = mode_->ctLo;
+ int spanR = 0, spanB = 0;
+ /* Step down the CT curve evaluating log likelihood. */
+ while (true) {
+ double r = config_.ctR.eval(t, &spanR);
+ double b = config_.ctB.eval(t, &spanB);
+ double gainR = 1 / r, gainB = 1 / b;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ double priorLogLikelihood = prior.eval(prior.domain().clip(t));
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
+ LOG(RPiAwb, Debug)
+ << "t: " << t << " gain R " << gainR << " gain B "
+ << gainB << " delta2_sum " << delta2Sum
+ << " prior " << priorLogLikelihood << " final "
+ << finalLogLikelihood;
+ points_.push_back(Pwl::Point(t, finalLogLikelihood));
+ if (points_.back().y < points_[bestPoint].y)
+ bestPoint = points_.size() - 1;
+ if (t == mode_->ctHi)
+ break;
+ /* for even steps along the r/b curve scale them by the current t */
+ t = std::min(t + t / 10 * config_.coarseStep, mode_->ctHi);
+ }
+ t = points_[bestPoint].x;
+ LOG(RPiAwb, Debug) << "Coarse search found CT " << t;
+ /*
+ * We have the best point of the search, but refine it with a quadratic
+ * interpolation around its neighbours.
+ */
+ if (points_.size() > 2) {
+ unsigned long bp = std::min(bestPoint, points_.size() - 2);
+ bestPoint = std::max(1UL, bp);
+ t = interpolateQuadatric(points_[bestPoint - 1],
+ points_[bestPoint],
+ points_[bestPoint + 1]);
+ LOG(RPiAwb, Debug)
+ << "After quadratic refinement, coarse search has CT "
+ << t;
+ }
+ return t;
+}
+
+void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
+{
+ int spanR = -1, spanB = -1;
+ config_.ctR.eval(t, &spanR);
+ config_.ctB.eval(t, &spanB);
+ double step = t / 10 * config_.coarseStep * 0.1;
+ int nsteps = 5;
+ double rDiff = config_.ctR.eval(t + nsteps * step, &spanR) -
+ config_.ctR.eval(t - nsteps * step, &spanR);
+ double bDiff = config_.ctB.eval(t + nsteps * step, &spanB) -
+ config_.ctB.eval(t - nsteps * step, &spanB);
+ Pwl::Point transverse(bDiff, -rDiff);
+ if (transverse.len2() < 1e-6)
+ return;
+ /*
+ * unit vector orthogonal to the b vs. r function (pointing outwards
+ * with r and b increasing)
+ */
+ transverse = transverse / transverse.len();
+ double bestLogLikelihood = 0, bestT = 0, bestR = 0, bestB = 0;
+ double transverseRange = config_.transverseNeg + config_.transversePos;
+ const int maxNumDeltas = 12;
+ /* a transverse step approximately every 0.01 r/b units */
+ int numDeltas = floor(transverseRange * 100 + 0.5) + 1;
+ numDeltas = numDeltas < 3 ? 3 : (numDeltas > maxNumDeltas ? maxNumDeltas : numDeltas);
+ /*
+ * Step down CT curve. March a bit further if the transverse range is
+ * large.
+ */
+ nsteps += numDeltas;
+ for (int i = -nsteps; i <= nsteps; i++) {
+ double tTest = t + i * step;
+ double priorLogLikelihood =
+ prior.eval(prior.domain().clip(tTest));
+ double rCurve = config_.ctR.eval(tTest, &spanR);
+ double bCurve = config_.ctB.eval(tTest, &spanB);
+ /* x will be distance off the curve, y the log likelihood there */
+ Pwl::Point points[maxNumDeltas];
+ int bestPoint = 0;
+ /* Take some measurements transversely *off* the CT curve. */
+ for (int j = 0; j < numDeltas; j++) {
+ points[j].x = -config_.transverseNeg +
+ (transverseRange * j) / (numDeltas - 1);
+ Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) +
+ transverse * points[j].x;
+ double rTest = rbTest.x, bTest = rbTest.y;
+ double gainR = 1 / rTest, gainB = 1 / bTest;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ points[j].y = delta2Sum - priorLogLikelihood;
+ LOG(RPiAwb, Debug)
+ << "At t " << tTest << " r " << rTest << " b "
+ << bTest << ": " << points[j].y;
+ if (points[j].y < points[bestPoint].y)
+ bestPoint = j;
+ }
+ /*
+ * We have NUM_DELTAS points transversely across the CT curve,
+ * now let's do a quadratic interpolation for the best result.
+ */
+ bestPoint = std::max(1, std::min(bestPoint, numDeltas - 2));
+ Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) +
+ transverse * interpolateQuadatric(points[bestPoint - 1],
+ points[bestPoint],
+ points[bestPoint + 1]);
+ double rTest = rbTest.x, bTest = rbTest.y;
+ double gainR = 1 / rTest, gainB = 1 / bTest;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
+ LOG(RPiAwb, Debug)
+ << "Finally "
+ << tTest << " r " << rTest << " b " << bTest << ": "
+ << finalLogLikelihood
+ << (finalLogLikelihood < bestLogLikelihood ? " BEST" : "");
+ if (bestT == 0 || finalLogLikelihood < bestLogLikelihood)
+ bestLogLikelihood = finalLogLikelihood,
+ bestT = tTest, bestR = rTest, bestB = bTest;
+ }
+ t = bestT, r = bestR, b = bestB;
+ LOG(RPiAwb, Debug)
+ << "Fine search found t " << t << " r " << r << " b " << b;
+}
+
+void Awb::awbBayes()
+{
+ /*
+ * May as well divide out G to save computeDelta2Sum from doing it over
+ * and over.
+ */
+ for (auto &z : zones_)
+ z.R = z.R / (z.G + 1), z.B = z.B / (z.G + 1);
+ /*
+ * Get the current prior, and scale according to how many zones are
+ * valid... not entirely sure about this.
+ */
+ Pwl prior = interpolatePrior();
+ prior *= zones_.size() / (double)(statistics_->awbRegions.numRegions());
+ prior.map([](double x, double y) {
+ LOG(RPiAwb, Debug) << "(" << x << "," << y << ")";
+ });
+ double t = coarseSearch(prior);
+ double r = config_.ctR.eval(t);
+ double b = config_.ctB.eval(t);
+ LOG(RPiAwb, Debug)
+ << "After coarse search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")";
+ /*
+ * Not entirely sure how to handle the fine search yet. Mostly the
+ * estimated CT is already good enough, but the fine search allows us to
+ * wander transverely off the CT curve. Under some illuminants, where
+ * there may be more or less green light, this may prove beneficial,
+ * though I probably need more real datasets before deciding exactly how
+ * this should be controlled and tuned.
+ */
+ fineSearch(t, r, b, prior);
+ LOG(RPiAwb, Debug)
+ << "After fine search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")";
+ /*
+ * Write results out for the main thread to pick up. Remember to adjust
+ * the gains from the ones that the "canonical sensor" would require to
+ * the ones needed by *this* sensor.
+ */
+ asyncResults_.temperatureK = t;
+ asyncResults_.gainR = 1.0 / r * config_.sensitivityR;
+ asyncResults_.gainG = 1.0;
+ asyncResults_.gainB = 1.0 / b * config_.sensitivityB;
+}
+
+void Awb::awbGrey()
+{
+ LOG(RPiAwb, Debug) << "Grey world AWB";
+ /*
+ * Make a separate list of the derivatives for each of red and blue, so
+ * that we can sort them to exclude the extreme gains. We could
+ * consider some variations, such as normalising all the zones first, or
+ * doing an L2 average etc.
+ */
+ std::vector<RGB> &derivsR(zones_);
+ std::vector<RGB> derivsB(derivsR);
+ std::sort(derivsR.begin(), derivsR.end(),
+ [](RGB const &a, RGB const &b) {
+ return a.G * b.R < b.G * a.R;
+ });
+ std::sort(derivsB.begin(), derivsB.end(),
+ [](RGB const &a, RGB const &b) {
+ return a.G * b.B < b.G * a.B;
+ });
+ /* Average the middle half of the values. */
+ int discard = derivsR.size() / 4;
+ RGB sumR(0, 0, 0), sumB(0, 0, 0);
+ for (auto ri = derivsR.begin() + discard,
+ bi = derivsB.begin() + discard;
+ ri != derivsR.end() - discard; ri++, bi++)
+ sumR += *ri, sumB += *bi;
+ double gainR = sumR.G / (sumR.R + 1),
+ gainB = sumB.G / (sumB.B + 1);
+ asyncResults_.temperatureK = 4500; /* don't know what it is */
+ asyncResults_.gainR = gainR;
+ asyncResults_.gainG = 1.0;
+ asyncResults_.gainB = gainB;
+}
+
+void Awb::doAwb()
+{
+ prepareStats();
+ LOG(RPiAwb, Debug) << "Valid zones: " << zones_.size();
+ if (zones_.size() > config_.minRegions) {
+ if (config_.bayes)
+ awbBayes();
+ else
+ awbGrey();
+ LOG(RPiAwb, Debug)
+ << "CT found is "
+ << asyncResults_.temperatureK
+ << " with gains r " << asyncResults_.gainR
+ << " and b " << asyncResults_.gainB;
+ }
+ /*
+ * we're done with these; we may as well relinquish our hold on the
+ * pointer.
+ */
+ statistics_.reset();
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Awb(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/awb.h b/src/ipa/rpi/controller/rpi/awb.h
new file mode 100644
index 00000000..499b4519
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/awb.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AWB control algorithm
+ */
+#pragma once
+
+#include <mutex>
+#include <condition_variable>
+#include <thread>
+
+#include "../awb_algorithm.h"
+#include "../pwl.h"
+#include "../awb_status.h"
+#include "../statistics.h"
+
+namespace RPiController {
+
+/* Control algorithm to perform AWB calculations. */
+
+struct AwbMode {
+ int read(const libcamera::YamlObject &params);
+ double ctLo; /* low CT value for search */
+ double ctHi; /* high CT value for search */
+};
+
+struct AwbPrior {
+ int read(const libcamera::YamlObject &params);
+ double lux; /* lux level */
+ Pwl prior; /* maps CT to prior log likelihood for this lux level */
+};
+
+struct AwbConfig {
+ AwbConfig() : defaultMode(nullptr) {}
+ int read(const libcamera::YamlObject &params);
+ /* Only repeat the AWB calculation every "this many" frames */
+ uint16_t framePeriod;
+ /* number of initial frames for which speed taken as 1.0 (maximum) */
+ uint16_t startupFrames;
+ unsigned int convergenceFrames; /* approx number of frames to converge */
+ double speed; /* IIR filter speed applied to algorithm results */
+ bool fast; /* "fast" mode uses a 16x16 rather than 32x32 grid */
+ Pwl ctR; /* function maps CT to r (= R/G) */
+ Pwl ctB; /* function maps CT to b (= B/G) */
+ Pwl ctRInverse; /* inverse of ctR */
+ Pwl ctBInverse; /* inverse of ctB */
+ /* table of illuminant priors at different lux levels */
+ std::vector<AwbPrior> priors;
+ /* AWB "modes" (determines the search range) */
+ std::map<std::string, AwbMode> modes;
+ AwbMode *defaultMode; /* mode used if no mode selected */
+ /*
+ * minimum proportion of pixels counted within AWB region for it to be
+ * "useful"
+ */
+ double minPixels;
+ /* minimum G value of those pixels, to be regarded a "useful" */
+ uint16_t minG;
+ /*
+ * number of AWB regions that must be "useful" in order to do the AWB
+ * calculation
+ */
+ uint32_t minRegions;
+ /* clamp on colour error term (so as not to penalise non-grey excessively) */
+ double deltaLimit;
+ /* step size control in coarse search */
+ double coarseStep;
+ /* how far to wander off CT curve towards "more purple" */
+ double transversePos;
+ /* how far to wander off CT curve towards "more green" */
+ double transverseNeg;
+ /*
+ * red sensitivity ratio (set to canonical sensor's R/G divided by this
+ * sensor's R/G)
+ */
+ double sensitivityR;
+ /*
+ * blue sensitivity ratio (set to canonical sensor's B/G divided by this
+ * sensor's B/G)
+ */
+ double sensitivityB;
+ /* The whitepoint (which we normally "aim" for) can be moved. */
+ double whitepointR;
+ double whitepointB;
+ bool bayes; /* use Bayesian algorithm */
+};
+
+class Awb : public AwbAlgorithm
+{
+public:
+ Awb(Controller *controller = NULL);
+ ~Awb();
+ char const *name() const override;
+ void initialise() override;
+ int read(const libcamera::YamlObject &params) override;
+ unsigned int getConvergenceFrames() const override;
+ void initialValues(double &gainR, double &gainB) override;
+ void setMode(std::string const &name) override;
+ void setManualGains(double manualR, double manualB) override;
+ void enableAuto() override;
+ void disableAuto() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ struct RGB {
+ RGB(double r = 0, double g = 0, double b = 0)
+ : R(r), G(g), B(b)
+ {
+ }
+ double R, G, B;
+ RGB &operator+=(RGB const &other)
+ {
+ R += other.R, G += other.G, B += other.B;
+ return *this;
+ }
+ };
+
+private:
+ bool isAutoEnabled() const;
+ /* configuration is read-only, and available to both threads */
+ AwbConfig config_;
+ std::thread asyncThread_;
+ void asyncFunc(); /* asynchronous thread function */
+ std::mutex mutex_;
+ /* condvar for async thread to wait on */
+ std::condition_variable asyncSignal_;
+ /* condvar for synchronous thread to wait on */
+ std::condition_variable syncSignal_;
+ /* for sync thread to check if async thread finished (requires mutex) */
+ bool asyncFinished_;
+ /* for async thread to check if it's been told to run (requires mutex) */
+ bool asyncStart_;
+ /* for async thread to check if it's been told to quit (requires mutex) */
+ bool asyncAbort_;
+
+ /*
+ * The following are only for the synchronous thread to use:
+ * for sync thread to note its has asked async thread to run
+ */
+ bool asyncStarted_;
+ /* counts up to framePeriod before restarting the async thread */
+ int framePhase_;
+ int frameCount_; /* counts up to startup_frames */
+ AwbStatus syncResults_;
+ AwbStatus prevSyncResults_;
+ std::string modeName_;
+ /*
+ * The following are for the asynchronous thread to use, though the main
+ * thread can set/reset them if the async thread is known to be idle:
+ */
+ void restartAsync(StatisticsPtr &stats, double lux);
+ /* copy out the results from the async thread so that it can be restarted */
+ void fetchAsyncResults();
+ StatisticsPtr statistics_;
+ AwbMode *mode_;
+ double lux_;
+ AwbStatus asyncResults_;
+ void doAwb();
+ void awbBayes();
+ void awbGrey();
+ void prepareStats();
+ double computeDelta2Sum(double gainR, double gainB);
+ Pwl interpolatePrior();
+ double coarseSearch(Pwl const &prior);
+ void fineSearch(double &t, double &r, double &b, Pwl const &prior);
+ std::vector<RGB> zones_;
+ std::vector<Pwl::Point> points_;
+ /* manual r setting */
+ double manualR_;
+ /* manual b setting */
+ double manualB_;
+};
+
+static inline Awb::RGB operator+(Awb::RGB const &a, Awb::RGB const &b)
+{
+ return Awb::RGB(a.R + b.R, a.G + b.G, a.B + b.B);
+}
+static inline Awb::RGB operator-(Awb::RGB const &a, Awb::RGB const &b)
+{
+ return Awb::RGB(a.R - b.R, a.G - b.G, a.B - b.B);
+}
+static inline Awb::RGB operator*(double d, Awb::RGB const &rgb)
+{
+ return Awb::RGB(d * rgb.R, d * rgb.G, d * rgb.B);
+}
+static inline Awb::RGB operator*(Awb::RGB const &rgb, double d)
+{
+ return d * rgb;
+}
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/black_level.cpp b/src/ipa/rpi/controller/rpi/black_level.cpp
new file mode 100644
index 00000000..ea991df9
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/black_level.cpp
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * black level control algorithm
+ */
+
+#include <math.h>
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+#include "../black_level_status.h"
+
+#include "black_level.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiBlackLevel)
+
+#define NAME "rpi.black_level"
+
+BlackLevel::BlackLevel(Controller *controller)
+ : BlackLevelAlgorithm(controller)
+{
+}
+
+char const *BlackLevel::name() const
+{
+ return NAME;
+}
+
+int BlackLevel::read(const libcamera::YamlObject &params)
+{
+ /* 64 in 10 bits scaled to 16 bits */
+ uint16_t blackLevel = params["black_level"].get<uint16_t>(4096);
+ blackLevelR_ = params["black_level_r"].get<uint16_t>(blackLevel);
+ blackLevelG_ = params["black_level_g"].get<uint16_t>(blackLevel);
+ blackLevelB_ = params["black_level_b"].get<uint16_t>(blackLevel);
+ LOG(RPiBlackLevel, Debug)
+ << " Read black levels red " << blackLevelR_
+ << " green " << blackLevelG_
+ << " blue " << blackLevelB_;
+ return 0;
+}
+
+void BlackLevel::initialValues(uint16_t &blackLevelR, uint16_t &blackLevelG,
+ uint16_t &blackLevelB)
+{
+ blackLevelR = blackLevelR_;
+ blackLevelG = blackLevelG_;
+ blackLevelB = blackLevelB_;
+}
+
+void BlackLevel::prepare(Metadata *imageMetadata)
+{
+ /*
+ * Possibly we should think about doing this in a switchMode or
+ * something?
+ */
+ struct BlackLevelStatus status;
+ status.blackLevelR = blackLevelR_;
+ status.blackLevelG = blackLevelG_;
+ status.blackLevelB = blackLevelB_;
+ imageMetadata->set("black_level.status", status);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return new BlackLevel(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/black_level.h b/src/ipa/rpi/controller/rpi/black_level.h
new file mode 100644
index 00000000..f50729db
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/black_level.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * black level control algorithm
+ */
+#pragma once
+
+#include "../black_level_algorithm.h"
+#include "../black_level_status.h"
+
+/* This is our implementation of the "black level algorithm". */
+
+namespace RPiController {
+
+class BlackLevel : public BlackLevelAlgorithm
+{
+public:
+ BlackLevel(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialValues(uint16_t &blackLevelR, uint16_t &blackLevelG,
+ uint16_t &blackLevelB) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ double blackLevelR_;
+ double blackLevelG_;
+ double blackLevelB_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/cac.cpp b/src/ipa/rpi/controller/rpi/cac.cpp
new file mode 100644
index 00000000..17779ad5
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/cac.cpp
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * Chromatic Aberration Correction algorithm
+ */
+#include "cac.h"
+
+#include <libcamera/base/log.h>
+
+#include "cac_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiCac)
+
+#define NAME "rpi.cac"
+
+Cac::Cac(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Cac::name() const
+{
+ return NAME;
+}
+
+static bool arrayToSet(const libcamera::YamlObject &params, std::vector<double> &inputArray, const Size &size)
+{
+ int num = 0;
+ int max_num = (size.width + 1) * (size.height + 1);
+ inputArray.resize(max_num);
+
+ for (const auto &p : params.asList()) {
+ if (num == max_num)
+ return false;
+ inputArray[num++] = p.get<double>(0);
+ }
+
+ return num == max_num;
+}
+
+static void setStrength(std::vector<double> &inputArray, std::vector<double> &outputArray,
+ double strengthFactor)
+{
+ int num = 0;
+ for (const auto &p : inputArray) {
+ outputArray[num++] = p * strengthFactor;
+ }
+}
+
+int Cac::read(const libcamera::YamlObject &params)
+{
+ config_.enabled = params.contains("lut_rx") && params.contains("lut_ry") &&
+ params.contains("lut_bx") && params.contains("lut_by");
+ if (!config_.enabled)
+ return 0;
+
+ const Size &size = getHardwareConfig().cacRegions;
+
+ if (!arrayToSet(params["lut_rx"], config_.lutRx, size)) {
+ LOG(RPiCac, Error) << "Bad CAC lut_rx table";
+ return -EINVAL;
+ }
+
+ if (!arrayToSet(params["lut_ry"], config_.lutRy, size)) {
+ LOG(RPiCac, Error) << "Bad CAC lut_ry table";
+ return -EINVAL;
+ }
+
+ if (!arrayToSet(params["lut_bx"], config_.lutBx, size)) {
+ LOG(RPiCac, Error) << "Bad CAC lut_bx table";
+ return -EINVAL;
+ }
+
+ if (!arrayToSet(params["lut_by"], config_.lutBy, size)) {
+ LOG(RPiCac, Error) << "Bad CAC lut_by table";
+ return -EINVAL;
+ }
+
+ double strength = params["strength"].get<double>(1);
+ cacStatus_.lutRx = config_.lutRx;
+ cacStatus_.lutRy = config_.lutRy;
+ cacStatus_.lutBx = config_.lutBx;
+ cacStatus_.lutBy = config_.lutBy;
+ setStrength(config_.lutRx, cacStatus_.lutRx, strength);
+ setStrength(config_.lutBx, cacStatus_.lutBx, strength);
+ setStrength(config_.lutRy, cacStatus_.lutRy, strength);
+ setStrength(config_.lutBy, cacStatus_.lutBy, strength);
+
+ return 0;
+}
+
+void Cac::prepare(Metadata *imageMetadata)
+{
+ if (config_.enabled)
+ imageMetadata->set("cac.status", cacStatus_);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Cac(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/rpi/controller/rpi/cac.h b/src/ipa/rpi/controller/rpi/cac.h
new file mode 100644
index 00000000..a7b14c00
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/cac.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * cac.hpp - CAC control algorithm
+ */
+#pragma once
+
+#include "algorithm.h"
+#include "cac_status.h"
+
+namespace RPiController {
+
+struct CacConfig {
+ bool enabled;
+ std::vector<double> lutRx;
+ std::vector<double> lutRy;
+ std::vector<double> lutBx;
+ std::vector<double> lutBy;
+};
+
+class Cac : public Algorithm
+{
+public:
+ Cac(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ CacConfig config_;
+ CacStatus cacStatus_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/rpi/ccm.cpp b/src/ipa/rpi/controller/rpi/ccm.cpp
new file mode 100644
index 00000000..c5588029
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/ccm.cpp
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * CCM (colour correction matrix) control algorithm
+ */
+
+#include <libcamera/base/log.h>
+
+#include "../awb_status.h"
+#include "../ccm_status.h"
+#include "../lux_status.h"
+#include "../metadata.h"
+
+#include "ccm.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiCcm)
+
+/*
+ * This algorithm selects a CCM (Colour Correction Matrix) according to the
+ * colour temperature estimated by AWB (interpolating between known matricies as
+ * necessary). Additionally the amount of colour saturation can be controlled
+ * both according to the current estimated lux level and according to a
+ * saturation setting that is exposed to applications.
+ */
+
+#define NAME "rpi.ccm"
+
+Matrix::Matrix()
+{
+ memset(m, 0, sizeof(m));
+}
+Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
+ double m6, double m7, double m8)
+{
+ m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4,
+ m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8;
+}
+int Matrix::read(const libcamera::YamlObject &params)
+{
+ double *ptr = (double *)m;
+
+ if (params.size() != 9) {
+ LOG(RPiCcm, Error) << "Wrong number of values in CCM";
+ return -EINVAL;
+ }
+
+ for (const auto &param : params.asList()) {
+ auto value = param.get<double>();
+ if (!value)
+ return -EINVAL;
+ *ptr++ = *value;
+ }
+
+ return 0;
+}
+
+Ccm::Ccm(Controller *controller)
+ : CcmAlgorithm(controller), saturation_(1.0) {}
+
+char const *Ccm::name() const
+{
+ return NAME;
+}
+
+int Ccm::read(const libcamera::YamlObject &params)
+{
+ int ret;
+
+ if (params.contains("saturation")) {
+ ret = config_.saturation.read(params["saturation"]);
+ if (ret)
+ return ret;
+ }
+
+ for (auto &p : params["ccms"].asList()) {
+ auto value = p["ct"].get<double>();
+ if (!value)
+ return -EINVAL;
+
+ CtCcm ctCcm;
+ ctCcm.ct = *value;
+ ret = ctCcm.ccm.read(p["ccm"]);
+ if (ret)
+ return ret;
+
+ if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) {
+ LOG(RPiCcm, Error)
+ << "CCM not in increasing colour temperature order";
+ return -EINVAL;
+ }
+
+ config_.ccms.push_back(std::move(ctCcm));
+ }
+
+ if (config_.ccms.empty()) {
+ LOG(RPiCcm, Error) << "No CCMs specified";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void Ccm::setSaturation(double saturation)
+{
+ saturation_ = saturation;
+}
+
+void Ccm::initialise()
+{
+}
+
+template<typename T>
+static bool getLocked(Metadata *metadata, std::string const &tag, T &value)
+{
+ T *ptr = metadata->getLocked<T>(tag);
+ if (ptr == nullptr)
+ return false;
+ value = *ptr;
+ return true;
+}
+
+Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct)
+{
+ if (ct <= ccms.front().ct)
+ return ccms.front().ccm;
+ else if (ct >= ccms.back().ct)
+ return ccms.back().ccm;
+ else {
+ int i = 0;
+ for (; ct > ccms[i].ct; i++)
+ ;
+ double lambda =
+ (ct - ccms[i - 1].ct) / (ccms[i].ct - ccms[i - 1].ct);
+ return lambda * ccms[i].ccm + (1.0 - lambda) * ccms[i - 1].ccm;
+ }
+}
+
+Matrix applySaturation(Matrix const &ccm, double saturation)
+{
+ Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419,
+ -0.081);
+ Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771,
+ 0.000);
+ Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation);
+ return Y2RGB * S * RGB2Y * ccm;
+}
+
+void Ccm::prepare(Metadata *imageMetadata)
+{
+ bool awbOk = false, luxOk = false;
+ struct AwbStatus awb = {};
+ awb.temperatureK = 4000; /* in case no metadata */
+ struct LuxStatus lux = {};
+ lux.lux = 400; /* in case no metadata */
+ {
+ /* grab mutex just once to get everything */
+ std::lock_guard<Metadata> lock(*imageMetadata);
+ awbOk = getLocked(imageMetadata, "awb.status", awb);
+ luxOk = getLocked(imageMetadata, "lux.status", lux);
+ }
+ if (!awbOk)
+ LOG(RPiCcm, Warning) << "no colour temperature found";
+ if (!luxOk)
+ LOG(RPiCcm, Warning) << "no lux value found";
+ Matrix ccm = calculateCcm(config_.ccms, awb.temperatureK);
+ double saturation = saturation_;
+ struct CcmStatus ccmStatus;
+ ccmStatus.saturation = saturation;
+ if (!config_.saturation.empty())
+ saturation *= config_.saturation.eval(
+ config_.saturation.domain().clip(lux.lux));
+ ccm = applySaturation(ccm, saturation);
+ for (int j = 0; j < 3; j++)
+ for (int i = 0; i < 3; i++)
+ ccmStatus.matrix[j * 3 + i] =
+ std::max(-8.0, std::min(7.9999, ccm.m[j][i]));
+ LOG(RPiCcm, Debug)
+ << "colour temperature " << awb.temperatureK << "K";
+ LOG(RPiCcm, Debug)
+ << "CCM: " << ccmStatus.matrix[0] << " " << ccmStatus.matrix[1]
+ << " " << ccmStatus.matrix[2] << " "
+ << ccmStatus.matrix[3] << " " << ccmStatus.matrix[4]
+ << " " << ccmStatus.matrix[5] << " "
+ << ccmStatus.matrix[6] << " " << ccmStatus.matrix[7]
+ << " " << ccmStatus.matrix[8];
+ imageMetadata->set("ccm.status", ccmStatus);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Ccm(controller);
+ ;
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.hpp b/src/ipa/rpi/controller/rpi/ccm.h
index 330ed51f..b3abeddf 100644
--- a/src/ipa/raspberrypi/controller/rpi/ccm.hpp
+++ b/src/ipa/rpi/controller/rpi/ccm.h
@@ -1,26 +1,26 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * ccm.hpp - CCM (colour correction matrix) control algorithm
+ * CCM (colour correction matrix) control algorithm
*/
#pragma once
#include <vector>
-#include "../ccm_algorithm.hpp"
-#include "../pwl.hpp"
+#include "../ccm_algorithm.h"
+#include "../pwl.h"
namespace RPiController {
-// Algorithm to calculate colour matrix. Should be placed after AWB.
+/* Algorithm to calculate colour matrix. Should be placed after AWB. */
struct Matrix {
Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
double m6, double m7, double m8);
Matrix();
double m[3][3];
- void Read(boost::property_tree::ptree const &params);
+ int read(const libcamera::YamlObject &params);
};
static inline Matrix operator*(double d, Matrix const &m)
{
@@ -61,15 +61,15 @@ class Ccm : public CcmAlgorithm
{
public:
Ccm(Controller *controller = NULL);
- char const *Name() const override;
- void Read(boost::property_tree::ptree const &params) override;
- void SetSaturation(double saturation) override;
- void Initialise() override;
- void Prepare(Metadata *image_metadata) override;
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void setSaturation(double saturation) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
private:
CcmConfig config_;
double saturation_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/contrast.cpp b/src/ipa/rpi/controller/rpi/contrast.cpp
new file mode 100644
index 00000000..9eef792d
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/contrast.cpp
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast (gamma) control algorithm
+ */
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+#include "../contrast_status.h"
+#include "../histogram.h"
+
+#include "contrast.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiContrast)
+
+/*
+ * This is a very simple control algorithm which simply retrieves the results of
+ * AGC and AWB via their "status" metadata, and applies digital gain to the
+ * colour channels in accordance with those instructions. We take care never to
+ * apply less than unity gains, as that would cause fully saturated pixels to go
+ * off-white.
+ */
+
+#define NAME "rpi.contrast"
+
+Contrast::Contrast(Controller *controller)
+ : ContrastAlgorithm(controller), brightness_(0.0), contrast_(1.0)
+{
+}
+
+char const *Contrast::name() const
+{
+ return NAME;
+}
+
+int Contrast::read(const libcamera::YamlObject &params)
+{
+ // enable adaptive enhancement by default
+ config_.ceEnable = params["ce_enable"].get<int>(1);
+ ceEnable_ = config_.ceEnable;
+ // the point near the bottom of the histogram to move
+ config_.loHistogram = params["lo_histogram"].get<double>(0.01);
+ // where in the range to try and move it to
+ config_.loLevel = params["lo_level"].get<double>(0.015);
+ // but don't move by more than this
+ config_.loMax = params["lo_max"].get<double>(500);
+ // equivalent values for the top of the histogram...
+ config_.hiHistogram = params["hi_histogram"].get<double>(0.95);
+ config_.hiLevel = params["hi_level"].get<double>(0.95);
+ config_.hiMax = params["hi_max"].get<double>(2000);
+ return config_.gammaCurve.read(params["gamma_curve"]);
+}
+
+void Contrast::setBrightness(double brightness)
+{
+ brightness_ = brightness;
+}
+
+void Contrast::setContrast(double contrast)
+{
+ contrast_ = contrast;
+}
+
+void Contrast::enableCe(bool enable)
+{
+ ceEnable_ = enable;
+}
+
+void Contrast::restoreCe()
+{
+ ceEnable_ = config_.ceEnable;
+}
+
+void Contrast::initialise()
+{
+ /*
+ * Fill in some default values as Prepare will run before Process gets
+ * called.
+ */
+ status_.brightness = brightness_;
+ status_.contrast = contrast_;
+ status_.gammaCurve = config_.gammaCurve;
+}
+
+void Contrast::prepare(Metadata *imageMetadata)
+{
+ imageMetadata->set("contrast.status", status_);
+}
+
+Pwl computeStretchCurve(Histogram const &histogram,
+ ContrastConfig const &config)
+{
+ Pwl enhance;
+ enhance.append(0, 0);
+ /*
+ * If the start of the histogram is rather empty, try to pull it down a
+ * bit.
+ */
+ double histLo = histogram.quantile(config.loHistogram) *
+ (65536 / histogram.bins());
+ double levelLo = config.loLevel * 65536;
+ LOG(RPiContrast, Debug)
+ << "Move histogram point " << histLo << " to " << levelLo;
+ histLo = std::max(levelLo,
+ std::min(65535.0, std::min(histLo, levelLo + config.loMax)));
+ LOG(RPiContrast, Debug)
+ << "Final values " << histLo << " -> " << levelLo;
+ enhance.append(histLo, levelLo);
+ /*
+ * Keep the mid-point (median) in the same place, though, to limit the
+ * apparent amount of global brightness shift.
+ */
+ double mid = histogram.quantile(0.5) * (65536 / histogram.bins());
+ enhance.append(mid, mid);
+
+ /*
+ * If the top to the histogram is empty, try to pull the pixel values
+ * there up.
+ */
+ double histHi = histogram.quantile(config.hiHistogram) *
+ (65536 / histogram.bins());
+ double levelHi = config.hiLevel * 65536;
+ LOG(RPiContrast, Debug)
+ << "Move histogram point " << histHi << " to " << levelHi;
+ histHi = std::min(levelHi,
+ std::max(0.0, std::max(histHi, levelHi - config.hiMax)));
+ LOG(RPiContrast, Debug)
+ << "Final values " << histHi << " -> " << levelHi;
+ enhance.append(histHi, levelHi);
+ enhance.append(65535, 65535);
+ return enhance;
+}
+
+Pwl applyManualContrast(Pwl const &gammaCurve, double brightness,
+ double contrast)
+{
+ Pwl newGammaCurve;
+ LOG(RPiContrast, Debug)
+ << "Manual brightness " << brightness << " contrast " << contrast;
+ gammaCurve.map([&](double x, double y) {
+ newGammaCurve.append(
+ x, std::max(0.0, std::min(65535.0,
+ (y - 32768) * contrast +
+ 32768 + brightness)));
+ });
+ return newGammaCurve;
+}
+
+void Contrast::process(StatisticsPtr &stats,
+ [[maybe_unused]] Metadata *imageMetadata)
+{
+ Histogram &histogram = stats->yHist;
+ /*
+ * We look at the histogram and adjust the gamma curve in the following
+ * ways: 1. Adjust the gamma curve so as to pull the start of the
+ * histogram down, and possibly push the end up.
+ */
+ Pwl gammaCurve = config_.gammaCurve;
+ if (ceEnable_) {
+ if (config_.loMax != 0 || config_.hiMax != 0)
+ gammaCurve = computeStretchCurve(histogram, config_).compose(gammaCurve);
+ /*
+ * We could apply other adjustments (e.g. partial equalisation)
+ * based on the histogram...?
+ */
+ }
+ /*
+ * 2. Finally apply any manually selected brightness/contrast
+ * adjustment.
+ */
+ if (brightness_ != 0 || contrast_ != 1.0)
+ gammaCurve = applyManualContrast(gammaCurve, brightness_, contrast_);
+ /*
+ * And fill in the status for output. Use more points towards the bottom
+ * of the curve.
+ */
+ status_.brightness = brightness_;
+ status_.contrast = contrast_;
+ status_.gammaCurve = std::move(gammaCurve);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Contrast(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/contrast.h b/src/ipa/rpi/controller/rpi/contrast.h
new file mode 100644
index 00000000..a9d9bbc9
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/contrast.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast (gamma) control algorithm
+ */
+#pragma once
+
+#include <mutex>
+
+#include "../contrast_algorithm.h"
+#include "../pwl.h"
+
+namespace RPiController {
+
+/*
+ * Back End algorithm to appaly correct digital gain. Should be placed after
+ * Back End AWB.
+ */
+
+struct ContrastConfig {
+ bool ceEnable;
+ double loHistogram;
+ double loLevel;
+ double loMax;
+ double hiHistogram;
+ double hiLevel;
+ double hiMax;
+ Pwl gammaCurve;
+};
+
+class Contrast : public ContrastAlgorithm
+{
+public:
+ Contrast(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void setBrightness(double brightness) override;
+ void setContrast(double contrast) override;
+ void enableCe(bool enable) override;
+ void restoreCe() override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+private:
+ ContrastConfig config_;
+ double brightness_;
+ double contrast_;
+ ContrastStatus status_;
+ double ceEnable_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/denoise.cpp b/src/ipa/rpi/controller/rpi/denoise.cpp
new file mode 100644
index 00000000..ba851658
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/denoise.cpp
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Denoise (spatial, colour, temporal) control algorithm
+ */
+#include "denoise.h"
+
+#include <libcamera/base/log.h>
+
+#include "denoise_status.h"
+#include "noise_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiDenoise)
+
+// Calculate settings for the denoise blocks using the noise profile in
+// the image metadata.
+
+#define NAME "rpi.denoise"
+
+int DenoiseConfig::read(const libcamera::YamlObject &params)
+{
+ sdnEnable = params.contains("sdn");
+ if (sdnEnable) {
+ auto &sdnParams = params["sdn"];
+ sdnDeviation = sdnParams["deviation"].get<double>(3.2);
+ sdnStrength = sdnParams["strength"].get<double>(0.25);
+ sdnDeviation2 = sdnParams["deviation2"].get<double>(sdnDeviation);
+ sdnDeviationNoTdn = sdnParams["deviation_no_tdn"].get<double>(sdnDeviation);
+ sdnStrengthNoTdn = sdnParams["strength_no_tdn"].get<double>(sdnStrength);
+ sdnTdnBackoff = sdnParams["backoff"].get<double>(0.75);
+ }
+
+ cdnEnable = params.contains("cdn");
+ if (cdnEnable) {
+ auto &cdnParams = params["cdn"];
+ cdnDeviation = cdnParams["deviation"].get<double>(120);
+ cdnStrength = cdnParams["strength"].get<double>(0.2);
+ }
+
+ tdnEnable = params.contains("tdn");
+ if (tdnEnable) {
+ auto &tdnParams = params["tdn"];
+ tdnDeviation = tdnParams["deviation"].get<double>(0.5);
+ tdnThreshold = tdnParams["threshold"].get<double>(0.75);
+ } else if (sdnEnable) {
+ /*
+ * If SDN is enabled but TDN isn't, overwrite all the SDN settings
+ * with the "no TDN" versions. This makes it easier to enable or
+ * disable TDN in the tuning file without editing all the other
+ * parameters.
+ */
+ sdnDeviation = sdnDeviation2 = sdnDeviationNoTdn;
+ sdnStrength = sdnStrengthNoTdn;
+ }
+
+ return 0;
+}
+
+Denoise::Denoise(Controller *controller)
+ : DenoiseAlgorithm(controller), mode_(DenoiseMode::ColourHighQuality)
+{
+}
+
+char const *Denoise::name() const
+{
+ return NAME;
+}
+
+int Denoise::read(const libcamera::YamlObject &params)
+{
+ if (!params.contains("normal")) {
+ configs_["normal"].read(params);
+ currentConfig_ = &configs_["normal"];
+
+ return 0;
+ }
+
+ for (const auto &[key, value] : params.asDict()) {
+ if (configs_[key].read(value)) {
+ LOG(RPiDenoise, Error) << "Failed to read denoise config " << key;
+ return -EINVAL;
+ }
+ }
+
+ auto it = configs_.find("normal");
+ if (it == configs_.end()) {
+ LOG(RPiDenoise, Error) << "No normal denoise settings found";
+ return -EINVAL;
+ }
+ currentConfig_ = &it->second;
+
+ return 0;
+}
+
+void Denoise::initialise()
+{
+}
+
+void Denoise::switchMode([[maybe_unused]] CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+ /* A mode switch effectively resets temporal denoise and it has to start over. */
+ currentSdnDeviation_ = currentConfig_->sdnDeviationNoTdn;
+ currentSdnStrength_ = currentConfig_->sdnStrengthNoTdn;
+ currentSdnDeviation2_ = currentConfig_->sdnDeviationNoTdn;
+}
+
+void Denoise::prepare(Metadata *imageMetadata)
+{
+ struct NoiseStatus noiseStatus = {};
+ noiseStatus.noiseSlope = 3.0; // in case no metadata
+ if (imageMetadata->get("noise.status", noiseStatus) != 0)
+ LOG(RPiDenoise, Warning) << "no noise profile found";
+
+ LOG(RPiDenoise, Debug)
+ << "Noise profile: constant " << noiseStatus.noiseConstant
+ << " slope " << noiseStatus.noiseSlope;
+
+ if (mode_ == DenoiseMode::Off)
+ return;
+
+ if (currentConfig_->sdnEnable) {
+ struct SdnStatus sdn;
+ sdn.noiseConstant = noiseStatus.noiseConstant * currentSdnDeviation_;
+ sdn.noiseSlope = noiseStatus.noiseSlope * currentSdnDeviation_;
+ sdn.noiseConstant2 = noiseStatus.noiseConstant * currentConfig_->sdnDeviation2;
+ sdn.noiseSlope2 = noiseStatus.noiseSlope * currentSdnDeviation2_;
+ sdn.strength = currentSdnStrength_;
+ imageMetadata->set("sdn.status", sdn);
+ LOG(RPiDenoise, Debug)
+ << "const " << sdn.noiseConstant
+ << " slope " << sdn.noiseSlope
+ << " str " << sdn.strength
+ << " const2 " << sdn.noiseConstant2
+ << " slope2 " << sdn.noiseSlope2;
+
+ /* For the next frame, we back off the SDN parameters as TDN ramps up. */
+ double f = currentConfig_->sdnTdnBackoff;
+ currentSdnDeviation_ = f * currentSdnDeviation_ + (1 - f) * currentConfig_->sdnDeviation;
+ currentSdnStrength_ = f * currentSdnStrength_ + (1 - f) * currentConfig_->sdnStrength;
+ currentSdnDeviation2_ = f * currentSdnDeviation2_ + (1 - f) * currentConfig_->sdnDeviation2;
+ }
+
+ if (currentConfig_->tdnEnable) {
+ struct TdnStatus tdn;
+ tdn.noiseConstant = noiseStatus.noiseConstant * currentConfig_->tdnDeviation;
+ tdn.noiseSlope = noiseStatus.noiseSlope * currentConfig_->tdnDeviation;
+ tdn.threshold = currentConfig_->tdnThreshold;
+ imageMetadata->set("tdn.status", tdn);
+ LOG(RPiDenoise, Debug)
+ << "programmed tdn threshold " << tdn.threshold
+ << " constant " << tdn.noiseConstant
+ << " slope " << tdn.noiseSlope;
+ }
+
+ if (currentConfig_->cdnEnable && mode_ != DenoiseMode::ColourOff) {
+ struct CdnStatus cdn;
+ cdn.threshold = currentConfig_->cdnDeviation * noiseStatus.noiseSlope + noiseStatus.noiseConstant;
+ cdn.strength = currentConfig_->cdnStrength;
+ imageMetadata->set("cdn.status", cdn);
+ LOG(RPiDenoise, Debug)
+ << "programmed cdn threshold " << cdn.threshold
+ << " strength " << cdn.strength;
+ }
+}
+
+void Denoise::setMode(DenoiseMode mode)
+{
+ // We only distinguish between off and all other modes.
+ mode_ = mode;
+}
+
+void Denoise::setConfig(std::string const &name)
+{
+ auto it = configs_.find(name);
+ if (it == configs_.end()) {
+ /*
+ * Some platforms may have no need for different denoise settings, so we only issue
+ * a warning if there clearly are several configurations.
+ */
+ if (configs_.size() > 1)
+ LOG(RPiDenoise, Warning) << "No denoise config found for " << name;
+ else
+ LOG(RPiDenoise, Debug) << "No denoise config found for " << name;
+ } else
+ currentConfig_ = &it->second;
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Denoise(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/rpi/controller/rpi/denoise.h b/src/ipa/rpi/controller/rpi/denoise.h
new file mode 100644
index 00000000..92ff4f93
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/denoise.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * denoise.hpp - Denoise (spatial, colour, temporal) control algorithm
+ */
+#pragma once
+
+#include <map>
+#include <string>
+
+#include "algorithm.h"
+#include "denoise_algorithm.h"
+
+namespace RPiController {
+
+// Algorithm to calculate correct denoise settings.
+
+struct DenoiseConfig {
+ double sdnDeviation;
+ double sdnStrength;
+ double sdnDeviation2;
+ double sdnDeviationNoTdn;
+ double sdnStrengthNoTdn;
+ double sdnTdnBackoff;
+ double cdnDeviation;
+ double cdnStrength;
+ double tdnDeviation;
+ double tdnThreshold;
+ bool tdnEnable;
+ bool sdnEnable;
+ bool cdnEnable;
+ int read(const libcamera::YamlObject &params);
+};
+
+class Denoise : public DenoiseAlgorithm
+{
+public:
+ Denoise(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void setMode(DenoiseMode mode) override;
+ void setConfig(std::string const &name) override;
+
+private:
+ std::map<std::string, DenoiseConfig> configs_;
+ DenoiseConfig *currentConfig_;
+ DenoiseMode mode_;
+
+ /* SDN parameters attenuate over time if TDN is running. */
+ double currentSdnDeviation_;
+ double currentSdnStrength_;
+ double currentSdnDeviation2_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/rpi/dpc.cpp b/src/ipa/rpi/controller/rpi/dpc.cpp
new file mode 100644
index 00000000..8aac03f7
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/dpc.cpp
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * DPC (defective pixel correction) control algorithm
+ */
+
+#include <libcamera/base/log.h>
+
+#include "dpc.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiDpc)
+
+/*
+ * We use the lux status so that we can apply stronger settings in darkness (if
+ * necessary).
+ */
+
+#define NAME "rpi.dpc"
+
+Dpc::Dpc(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Dpc::name() const
+{
+ return NAME;
+}
+
+int Dpc::read(const libcamera::YamlObject &params)
+{
+ config_.strength = params["strength"].get<int>(1);
+ if (config_.strength < 0 || config_.strength > 2) {
+ LOG(RPiDpc, Error) << "Bad strength value";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void Dpc::prepare(Metadata *imageMetadata)
+{
+ DpcStatus dpcStatus = {};
+ /* Should we vary this with lux level or analogue gain? TBD. */
+ dpcStatus.strength = config_.strength;
+ LOG(RPiDpc, Debug) << "strength " << dpcStatus.strength;
+ imageMetadata->set("dpc.status", dpcStatus);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Dpc(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/dpc.h b/src/ipa/rpi/controller/rpi/dpc.h
new file mode 100644
index 00000000..9cefb06d
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/dpc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * DPC (defective pixel correction) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../dpc_status.h"
+
+namespace RPiController {
+
+/* Back End algorithm to apply appropriate GEQ settings. */
+
+struct DpcConfig {
+ int strength;
+};
+
+class Dpc : public Algorithm
+{
+public:
+ Dpc(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ DpcConfig config_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/focus.hpp b/src/ipa/rpi/controller/rpi/focus.h
index 131b1d0f..ee014be9 100644
--- a/src/ipa/raspberrypi/controller/rpi/focus.hpp
+++ b/src/ipa/rpi/controller/rpi/focus.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * focus.hpp - focus algorithm
+ * focus algorithm
*/
#pragma once
-#include "../algorithm.hpp"
-#include "../metadata.hpp"
+#include "../algorithm.h"
+#include "../metadata.h"
/*
* The "focus" algorithm. All it does it print out a version of the
@@ -21,8 +21,8 @@ class Focus : public Algorithm
{
public:
Focus(Controller *controller);
- char const *Name() const override;
- void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
+ char const *name() const override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
};
} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/geq.cpp b/src/ipa/rpi/controller/rpi/geq.cpp
new file mode 100644
index 00000000..fb539d1f
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/geq.cpp
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * GEQ (green equalisation) control algorithm
+ */
+
+#include <libcamera/base/log.h>
+
+#include "../device_status.h"
+#include "../lux_status.h"
+#include "../pwl.h"
+
+#include "geq.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiGeq)
+
+/*
+ * We use the lux status so that we can apply stronger settings in darkness (if
+ * necessary).
+ */
+
+#define NAME "rpi.geq"
+
+Geq::Geq(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Geq::name() const
+{
+ return NAME;
+}
+
+int Geq::read(const libcamera::YamlObject &params)
+{
+ config_.offset = params["offset"].get<uint16_t>(0);
+ config_.slope = params["slope"].get<double>(0.0);
+ if (config_.slope < 0.0 || config_.slope >= 1.0) {
+ LOG(RPiGeq, Error) << "Bad slope value";
+ return -EINVAL;
+ }
+
+ if (params.contains("strength")) {
+ int ret = config_.strength.read(params["strength"]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void Geq::prepare(Metadata *imageMetadata)
+{
+ LuxStatus luxStatus = {};
+ luxStatus.lux = 400;
+ if (imageMetadata->get("lux.status", luxStatus))
+ LOG(RPiGeq, Warning) << "no lux data found";
+ DeviceStatus deviceStatus;
+ deviceStatus.analogueGain = 1.0; /* in case not found */
+ if (imageMetadata->get("device.status", deviceStatus))
+ LOG(RPiGeq, Warning)
+ << "no device metadata - use analogue gain of 1x";
+ GeqStatus geqStatus = {};
+ double strength = config_.strength.empty()
+ ? 1.0
+ : config_.strength.eval(config_.strength.domain().clip(luxStatus.lux));
+ strength *= deviceStatus.analogueGain;
+ double offset = config_.offset * strength;
+ double slope = config_.slope * strength;
+ geqStatus.offset = std::min(65535.0, std::max(0.0, offset));
+ geqStatus.slope = std::min(.99999, std::max(0.0, slope));
+ LOG(RPiGeq, Debug)
+ << "offset " << geqStatus.offset << " slope "
+ << geqStatus.slope << " (analogue gain "
+ << deviceStatus.analogueGain << " lux "
+ << luxStatus.lux << ")";
+ imageMetadata->set("geq.status", geqStatus);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Geq(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/geq.h b/src/ipa/rpi/controller/rpi/geq.h
new file mode 100644
index 00000000..2c8400c2
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/geq.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * GEQ (green equalisation) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../geq_status.h"
+
+namespace RPiController {
+
+/* Back End algorithm to apply appropriate GEQ settings. */
+
+struct GeqConfig {
+ uint16_t offset;
+ double slope;
+ Pwl strength; /* lux to strength factor */
+};
+
+class Geq : public Algorithm
+{
+public:
+ Geq(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ GeqConfig config_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/hdr.cpp b/src/ipa/rpi/controller/rpi/hdr.cpp
new file mode 100644
index 00000000..34cf360e
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/hdr.cpp
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * HDR control algorithm
+ */
+
+#include "hdr.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include "../agc_status.h"
+#include "../alsc_status.h"
+#include "../stitch_status.h"
+#include "../tonemap_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiHdr)
+
+#define NAME "rpi.hdr"
+
+void HdrConfig::read(const libcamera::YamlObject &params, const std::string &modeName)
+{
+ name = modeName;
+
+ if (!params.contains("cadence"))
+ LOG(RPiHdr, Fatal) << "No cadence for HDR mode " << name;
+ cadence = params["cadence"].getList<unsigned int>().value();
+ if (cadence.empty())
+ LOG(RPiHdr, Fatal) << "Empty cadence in HDR mode " << name;
+
+ /*
+ * In the JSON file it's easier to use the channel name as the key, but
+ * for us it's convenient to swap them over.
+ */
+ for (const auto &[k, v] : params["channel_map"].asDict())
+ channelMap[v.get<unsigned int>().value()] = k;
+
+ /* Lens shading related parameters. */
+ if (params.contains("spatial_gain_curve")) {
+ spatialGainCurve.read(params["spatial_gain_curve"]);
+ } else if (params.contains("spatial_gain")) {
+ double spatialGain = params["spatial_gain"].get<double>(2.0);
+ spatialGainCurve.append(0.0, spatialGain);
+ spatialGainCurve.append(0.01, spatialGain);
+ spatialGainCurve.append(0.06, 1.0); /* maybe make this programmable? */
+ spatialGainCurve.append(1.0, 1.0);
+ }
+
+ diffusion = params["diffusion"].get<unsigned int>(3);
+ /* Clip to an arbitrary limit just to stop typos from killing the system! */
+ const unsigned int MAX_DIFFUSION = 15;
+ if (diffusion > MAX_DIFFUSION) {
+ diffusion = MAX_DIFFUSION;
+ LOG(RPiHdr, Warning) << "Diffusion value clipped to " << MAX_DIFFUSION;
+ }
+
+ /* Read any tonemap parameters. */
+ tonemapEnable = params["tonemap_enable"].get<int>(0);
+ detailConstant = params["detail_constant"].get<uint16_t>(0);
+ detailSlope = params["detail_slope"].get<double>(0.0);
+ iirStrength = params["iir_strength"].get<double>(8.0);
+ strength = params["strength"].get<double>(1.5);
+ if (tonemapEnable)
+ tonemap.read(params["tonemap"]);
+ speed = params["speed"].get<double>(1.0);
+ if (params.contains("hi_quantile_targets")) {
+ hiQuantileTargets = params["hi_quantile_targets"].getList<double>().value();
+ if (hiQuantileTargets.empty() || hiQuantileTargets.size() % 2)
+ LOG(RPiHdr, Fatal) << "hi_quantile_targets much be even and non-empty";
+ } else
+ hiQuantileTargets = { 0.95, 0.65, 0.5, 0.28, 0.3, 0.25 };
+ hiQuantileMaxGain = params["hi_quantile_max_gain"].get<double>(1.6);
+ if (params.contains("quantile_targets")) {
+ quantileTargets = params["quantile_targets"].getList<double>().value();
+ if (quantileTargets.empty() || quantileTargets.size() % 2)
+ LOG(RPiHdr, Fatal) << "quantile_targets much be even and non-empty";
+ } else
+ quantileTargets = { 0.2, 0.03, 1.0, 0.15 };
+ powerMin = params["power_min"].get<double>(0.65);
+ powerMax = params["power_max"].get<double>(1.0);
+ if (params.contains("contrast_adjustments")) {
+ contrastAdjustments = params["contrast_adjustments"].getList<double>().value();
+ } else
+ contrastAdjustments = { 0.5, 0.75 };
+
+ /* Read any stitch parameters. */
+ stitchEnable = params["stitch_enable"].get<int>(0);
+ thresholdLo = params["threshold_lo"].get<uint16_t>(50000);
+ motionThreshold = params["motion_threshold"].get<double>(0.005);
+ diffPower = params["diff_power"].get<uint8_t>(13);
+ if (diffPower > 15)
+ LOG(RPiHdr, Fatal) << "Bad diff_power value in HDR mode " << name;
+}
+
+Hdr::Hdr(Controller *controller)
+ : HdrAlgorithm(controller)
+{
+ regions_ = controller->getHardwareConfig().awbRegions;
+ numRegions_ = regions_.width * regions_.height;
+ gains_[0].resize(numRegions_, 1.0);
+ gains_[1].resize(numRegions_, 1.0);
+}
+
+char const *Hdr::name() const
+{
+ return NAME;
+}
+
+int Hdr::read(const libcamera::YamlObject &params)
+{
+ /* Make an "HDR off" mode by default so that tuning files don't have to. */
+ HdrConfig &offMode = config_["Off"];
+ offMode.name = "Off";
+ offMode.cadence = { 0 };
+ offMode.channelMap[0] = "None";
+ status_.mode = offMode.name;
+ delayedStatus_.mode = offMode.name;
+
+ /*
+ * But we still allow the tuning file to override the "Off" mode if it wants.
+ * For example, maybe an application will make channel 0 be the "short"
+ * channel, in order to apply other AGC controls to it.
+ */
+ for (const auto &[key, value] : params.asDict())
+ config_[key].read(value, key);
+
+ return 0;
+}
+
+int Hdr::setMode(std::string const &mode)
+{
+ /* Always validate the mode, so it can be used later without checking. */
+ auto it = config_.find(mode);
+ if (it == config_.end()) {
+ LOG(RPiHdr, Warning) << "No such HDR mode " << mode;
+ return -1;
+ }
+
+ status_.mode = it->second.name;
+
+ return 0;
+}
+
+std::vector<unsigned int> Hdr::getChannels() const
+{
+ return config_.at(status_.mode).cadence;
+}
+
+void Hdr::updateAgcStatus(Metadata *metadata)
+{
+ std::scoped_lock lock(*metadata);
+ AgcStatus *agcStatus = metadata->getLocked<AgcStatus>("agc.status");
+ if (agcStatus) {
+ HdrConfig &hdrConfig = config_[status_.mode];
+ auto it = hdrConfig.channelMap.find(agcStatus->channel);
+ if (it != hdrConfig.channelMap.end()) {
+ status_.channel = it->second;
+ agcStatus->hdr = status_;
+ } else
+ LOG(RPiHdr, Warning) << "Channel " << agcStatus->channel
+ << " not found in mode " << status_.mode;
+ } else
+ LOG(RPiHdr, Warning) << "No agc.status found";
+}
+
+void Hdr::switchMode([[maybe_unused]] CameraMode const &cameraMode, Metadata *metadata)
+{
+ updateAgcStatus(metadata);
+ delayedStatus_ = status_;
+}
+
+void Hdr::prepare(Metadata *imageMetadata)
+{
+ AgcStatus agcStatus;
+ if (!imageMetadata->get<AgcStatus>("agc.delayed_status", agcStatus))
+ delayedStatus_ = agcStatus.hdr;
+
+ auto it = config_.find(delayedStatus_.mode);
+ if (it == config_.end()) {
+ /* Shouldn't be possible. There would be nothing we could do. */
+ LOG(RPiHdr, Warning) << "Unexpected HDR mode " << delayedStatus_.mode;
+ return;
+ }
+
+ HdrConfig &config = it->second;
+ if (config.spatialGainCurve.empty())
+ return;
+
+ AlscStatus alscStatus{}; /* some compilers seem to require the braces */
+ if (imageMetadata->get<AlscStatus>("alsc.status", alscStatus)) {
+ LOG(RPiHdr, Warning) << "No ALSC status";
+ return;
+ }
+
+ /* The final gains ended up in the odd or even array, according to diffusion. */
+ std::vector<double> &gains = gains_[config.diffusion & 1];
+ for (unsigned int i = 0; i < numRegions_; i++) {
+ alscStatus.r[i] *= gains[i];
+ alscStatus.g[i] *= gains[i];
+ alscStatus.b[i] *= gains[i];
+ }
+ imageMetadata->set("alsc.status", alscStatus);
+}
+
+bool Hdr::updateTonemap([[maybe_unused]] StatisticsPtr &stats, HdrConfig &config)
+{
+ /* When there's a change of HDR mode we start over with a new tonemap curve. */
+ if (delayedStatus_.mode != previousMode_) {
+ previousMode_ = delayedStatus_.mode;
+ tonemap_ = Pwl();
+ }
+
+ /* No tonemapping. No need to output a tonemap.status. */
+ if (!config.tonemapEnable)
+ return false;
+
+ /* If an explicit tonemap was given, use it. */
+ if (!config.tonemap.empty()) {
+ tonemap_ = config.tonemap;
+ return true;
+ }
+
+ /*
+ * We wouldn't update the tonemap on short frames when in multi-exposure mode. But
+ * we still need to output the most recent tonemap. Possibly we should make the
+ * config indicate the channels for which we should update the tonemap?
+ */
+ if (delayedStatus_.mode == "MultiExposure" && delayedStatus_.channel != "short")
+ return true;
+
+ /*
+ * Create a tonemap dynamically. We have three ingredients.
+ *
+ * 1. We have a list of "hi quantiles" and "targets". We use these to judge if
+ * the image does seem to be reasonably saturated. If it isn't, we calculate
+ * a gain that we will feed as a linear factor into the tonemap generation.
+ * This prevents unsaturated images from beoming quite so "flat".
+ *
+ * 2. We have a list of quantile/target pairs for the bottom of the histogram.
+ * We use these to calculate how much gain we must apply to the bottom of the
+ * tonemap. We apply this gain as a power curve so as not to blow out the top
+ * end.
+ *
+ * 3. Finally, when we generate the tonemap, we have some contrast adjustments
+ * for the bottom because we know that power curves can start quite steeply and
+ * cause a washed-out look.
+ */
+
+ /* Compute the linear gain from the headroom for saturation at the top. */
+ double gain = 10; /* arbitrary, but hiQuantileMaxGain will clamp it later */
+ for (unsigned int i = 0; i < config.hiQuantileTargets.size(); i += 2) {
+ double quantile = config.hiQuantileTargets[i];
+ double target = config.hiQuantileTargets[i + 1];
+ double value = stats->yHist.interQuantileMean(quantile, 1.0) / 1024.0;
+ double newGain = target / (value + 0.01);
+ gain = std::min(gain, newGain);
+ }
+ gain = std::clamp(gain, 1.0, config.hiQuantileMaxGain);
+
+ /* Compute the power curve from the amount of gain needed at the bottom. */
+ double min_power = 2; /* arbitrary, but config.powerMax will clamp it later */
+ for (unsigned int i = 0; i < config.quantileTargets.size(); i += 2) {
+ double quantile = config.quantileTargets[i];
+ double target = config.quantileTargets[i + 1];
+ double value = stats->yHist.interQuantileMean(0, quantile) / 1024.0;
+ value = std::min(value * gain, 1.0);
+ double power = log(target + 1e-6) / log(value + 1e-6);
+ min_power = std::min(min_power, power);
+ }
+ double power = std::clamp(min_power, config.powerMin, config.powerMax);
+
+ /* Generate the tonemap, including the contrast adjustment factors. */
+ Pwl tonemap;
+ tonemap.append(0, 0);
+ for (unsigned int i = 0; i <= 6; i++) {
+ double x = 1 << (i + 9); /* x loops from 512 to 32768 inclusive */
+ double y = pow(std::min(x * gain, 65535.0) / 65536.0, power) * 65536;
+ if (i < config.contrastAdjustments.size())
+ y *= config.contrastAdjustments[i];
+ if (!tonemap_.empty())
+ y = y * config.speed + tonemap_.eval(x) * (1 - config.speed);
+ tonemap.append(x, y);
+ }
+ tonemap.append(65535, 65535);
+ tonemap_ = tonemap;
+
+ return true;
+}
+
+static void averageGains(std::vector<double> &src, std::vector<double> &dst, const Size &size)
+{
+#define IDX(y, x) ((y)*size.width + (x))
+ unsigned int lastCol = size.width - 1; /* index of last column */
+ unsigned int preLastCol = lastCol - 1; /* and the column before that */
+ unsigned int lastRow = size.height - 1; /* index of last row */
+ unsigned int preLastRow = lastRow - 1; /* and the row before that */
+
+ /* Corners first. */
+ dst[IDX(0, 0)] = (src[IDX(0, 0)] + src[IDX(0, 1)] + src[IDX(1, 0)]) / 3;
+ dst[IDX(0, lastCol)] = (src[IDX(0, lastCol)] + src[IDX(0, preLastCol)] + src[IDX(1, lastCol)]) / 3;
+ dst[IDX(lastRow, 0)] = (src[IDX(lastRow, 0)] + src[IDX(lastRow, 1)] + src[IDX(preLastRow, 0)]) / 3;
+ dst[IDX(lastRow, lastCol)] = (src[IDX(lastRow, lastCol)] + src[IDX(lastRow, preLastCol)] +
+ src[IDX(preLastRow, lastCol)]) /
+ 3;
+
+ /* Now the edges. */
+ for (unsigned int i = 1; i < lastCol; i++) {
+ dst[IDX(0, i)] = (src[IDX(0, i - 1)] + src[IDX(0, i)] + src[IDX(0, i + 1)] + src[IDX(1, i)]) / 4;
+ dst[IDX(lastRow, i)] = (src[IDX(lastRow, i - 1)] + src[IDX(lastRow, i)] +
+ src[IDX(lastRow, i + 1)] + src[IDX(preLastRow, i)]) /
+ 4;
+ }
+
+ for (unsigned int i = 1; i < lastRow; i++) {
+ dst[IDX(i, 0)] = (src[IDX(i - 1, 0)] + src[IDX(i, 0)] + src[IDX(i + 1, 0)] + src[IDX(i, 1)]) / 4;
+ dst[IDX(i, 31)] = (src[IDX(i - 1, lastCol)] + src[IDX(i, lastCol)] +
+ src[IDX(i + 1, lastCol)] + src[IDX(i, preLastCol)]) /
+ 4;
+ }
+
+ /* Finally the interior. */
+ for (unsigned int j = 1; j < lastRow; j++) {
+ for (unsigned int i = 1; i < lastCol; i++) {
+ dst[IDX(j, i)] = (src[IDX(j - 1, i)] + src[IDX(j, i - 1)] + src[IDX(j, i)] +
+ src[IDX(j, i + 1)] + src[IDX(j + 1, i)]) /
+ 5;
+ }
+ }
+}
+
+void Hdr::updateGains(StatisticsPtr &stats, HdrConfig &config)
+{
+ if (config.spatialGainCurve.empty())
+ return;
+
+ /* When alternating exposures, only compute these gains for the short frame. */
+ if (delayedStatus_.mode == "MultiExposure" && delayedStatus_.channel != "short")
+ return;
+
+ for (unsigned int i = 0; i < numRegions_; i++) {
+ auto &region = stats->awbRegions.get(i);
+ unsigned int counted = region.counted;
+ counted += (counted == 0); /* avoid div by zero */
+ double r = region.val.rSum / counted;
+ double g = region.val.gSum / counted;
+ double b = region.val.bSum / counted;
+ double brightness = std::max({ r, g, b }) / 65535;
+ gains_[0][i] = config.spatialGainCurve.eval(brightness);
+ }
+
+ /* Ping-pong between the two gains_ buffers. */
+ for (unsigned int i = 0; i < config.diffusion; i++)
+ averageGains(gains_[i & 1], gains_[(i & 1) ^ 1], regions_);
+}
+
+void Hdr::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /* Note what HDR channel this frame will be once it comes back to us. */
+ updateAgcStatus(imageMetadata);
+
+ /*
+ * Now figure out what HDR channel this frame is. It should be available in the
+ * agc.delayed_status, unless this is an early frame after a mode switch, in which
+ * case delayedStatus_ should be right.
+ */
+ AgcStatus agcStatus;
+ if (!imageMetadata->get<AgcStatus>("agc.delayed_status", agcStatus))
+ delayedStatus_ = agcStatus.hdr;
+
+ auto it = config_.find(delayedStatus_.mode);
+ if (it == config_.end()) {
+ /* Shouldn't be possible. There would be nothing we could do. */
+ LOG(RPiHdr, Warning) << "Unexpected HDR mode " << delayedStatus_.mode;
+ return;
+ }
+
+ HdrConfig &config = it->second;
+
+ /* Update the spatially varying gains. They get written in prepare(). */
+ updateGains(stats, config);
+
+ if (updateTonemap(stats, config)) {
+ /* Add tonemap.status metadata. */
+ TonemapStatus tonemapStatus;
+
+ tonemapStatus.detailConstant = config.detailConstant;
+ tonemapStatus.detailSlope = config.detailSlope;
+ tonemapStatus.iirStrength = config.iirStrength;
+ tonemapStatus.strength = config.strength;
+ tonemapStatus.tonemap = tonemap_;
+
+ imageMetadata->set("tonemap.status", tonemapStatus);
+ }
+
+ if (config.stitchEnable) {
+ /* Add stitch.status metadata. */
+ StitchStatus stitchStatus;
+
+ stitchStatus.diffPower = config.diffPower;
+ stitchStatus.motionThreshold = config.motionThreshold;
+ stitchStatus.thresholdLo = config.thresholdLo;
+
+ imageMetadata->set("stitch.status", stitchStatus);
+ }
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Hdr(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/hdr.h b/src/ipa/rpi/controller/rpi/hdr.h
new file mode 100644
index 00000000..9b7327f8
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/hdr.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * HDR control algorithm
+ */
+#pragma once
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include <libcamera/geometry.h>
+
+#include "../hdr_algorithm.h"
+#include "../hdr_status.h"
+#include "../pwl.h"
+
+/* This is our implementation of an HDR algorithm. */
+
+namespace RPiController {
+
+struct HdrConfig {
+ std::string name;
+ std::vector<unsigned int> cadence;
+ std::map<unsigned int, std::string> channelMap;
+
+ /* Lens shading related parameters. */
+ Pwl spatialGainCurve; /* Brightness to gain curve for different image regions. */
+ unsigned int diffusion; /* How much to diffuse the gain spatially. */
+
+ /* Tonemap related parameters. */
+ bool tonemapEnable;
+ uint16_t detailConstant;
+ double detailSlope;
+ double iirStrength;
+ double strength;
+ Pwl tonemap;
+ /* These relate to adaptive tonemap calculation. */
+ double speed;
+ std::vector<double> hiQuantileTargets; /* quantiles to check for unsaturated images */
+ double hiQuantileMaxGain; /* the max gain we'll apply when unsaturated */
+ std::vector<double> quantileTargets; /* target values for histogram quantiles */
+ double powerMin; /* minimum tonemap power */
+ double powerMax; /* maximum tonemap power */
+ std::vector<double> contrastAdjustments; /* any contrast adjustment factors */
+
+ /* Stitch related parameters. */
+ bool stitchEnable;
+ uint16_t thresholdLo;
+ uint8_t diffPower;
+ double motionThreshold;
+
+ void read(const libcamera::YamlObject &params, const std::string &name);
+};
+
+class Hdr : public HdrAlgorithm
+{
+public:
+ Hdr(Controller *controller);
+ char const *name() const override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ int setMode(std::string const &mode) override;
+ std::vector<unsigned int> getChannels() const override;
+
+private:
+ void updateAgcStatus(Metadata *metadata);
+ void updateGains(StatisticsPtr &stats, HdrConfig &config);
+ bool updateTonemap(StatisticsPtr &stats, HdrConfig &config);
+
+ std::map<std::string, HdrConfig> config_;
+ HdrStatus status_; /* track the current HDR mode and channel */
+ HdrStatus delayedStatus_; /* track the delayed HDR mode and channel */
+ std::string previousMode_;
+ Pwl tonemap_;
+ libcamera::Size regions_; /* stats regions */
+ unsigned int numRegions_; /* total number of stats regions */
+ std::vector<double> gains_[2];
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/lux.cpp b/src/ipa/rpi/controller/rpi/lux.cpp
new file mode 100644
index 00000000..7b31faab
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/lux.cpp
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Lux control algorithm
+ */
+#include <math.h>
+
+#include <libcamera/base/log.h>
+
+#include "../device_status.h"
+
+#include "lux.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(RPiLux)
+
+#define NAME "rpi.lux"
+
+Lux::Lux(Controller *controller)
+ : Algorithm(controller)
+{
+ /*
+ * Put in some defaults as there will be no meaningful values until
+ * Process has run.
+ */
+ status_.aperture = 1.0;
+ status_.lux = 400;
+}
+
+char const *Lux::name() const
+{
+ return NAME;
+}
+
+int Lux::read(const libcamera::YamlObject &params)
+{
+ auto value = params["reference_shutter_speed"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceShutterSpeed_ = *value * 1.0us;
+
+ value = params["reference_gain"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceGain_ = *value;
+
+ referenceAperture_ = params["reference_aperture"].get<double>(1.0);
+
+ value = params["reference_Y"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceY_ = *value;
+
+ value = params["reference_lux"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceLux_ = *value;
+
+ currentAperture_ = referenceAperture_;
+ return 0;
+}
+
+void Lux::setCurrentAperture(double aperture)
+{
+ currentAperture_ = aperture;
+}
+
+void Lux::prepare(Metadata *imageMetadata)
+{
+ std::unique_lock<std::mutex> lock(mutex_);
+ imageMetadata->set("lux.status", status_);
+}
+
+void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ DeviceStatus deviceStatus;
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ double currentGain = deviceStatus.analogueGain;
+ double currentAperture = deviceStatus.aperture.value_or(currentAperture_);
+ double currentY = stats->yHist.interQuantileMean(0, 1);
+ double gainRatio = referenceGain_ / currentGain;
+ double shutterSpeedRatio =
+ referenceShutterSpeed_ / deviceStatus.shutterSpeed;
+ double apertureRatio = referenceAperture_ / currentAperture;
+ double yRatio = currentY * (65536 / stats->yHist.bins()) / referenceY_;
+ double estimatedLux = shutterSpeedRatio * gainRatio *
+ apertureRatio * apertureRatio *
+ yRatio * referenceLux_;
+ LuxStatus status;
+ status.lux = estimatedLux;
+ status.aperture = currentAperture;
+ LOG(RPiLux, Debug) << ": estimated lux " << estimatedLux;
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ status_ = status;
+ }
+ /*
+ * Overwrite the metadata here as well, so that downstream
+ * algorithms get the latest value.
+ */
+ imageMetadata->set("lux.status", status);
+ } else
+ LOG(RPiLux, Warning) << ": no device metadata";
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Lux(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/lux.h b/src/ipa/rpi/controller/rpi/lux.h
new file mode 100644
index 00000000..89f441fc
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/lux.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Lux control algorithm
+ */
+#pragma once
+
+#include <mutex>
+
+#include <libcamera/base/utils.h>
+
+#include "../lux_status.h"
+#include "../algorithm.h"
+
+/* This is our implementation of the "lux control algorithm". */
+
+namespace RPiController {
+
+class Lux : public Algorithm
+{
+public:
+ Lux(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ void setCurrentAperture(double aperture);
+
+private:
+ /*
+ * These values define the conditions of the reference image, against
+ * which we compare the new image.
+ */
+ libcamera::utils::Duration referenceShutterSpeed_;
+ double referenceGain_;
+ double referenceAperture_; /* units of 1/f */
+ double referenceY_; /* out of 65536 */
+ double referenceLux_;
+ double currentAperture_;
+ LuxStatus status_;
+ std::mutex mutex_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/noise.cpp b/src/ipa/rpi/controller/rpi/noise.cpp
new file mode 100644
index 00000000..3f1c62cf
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/noise.cpp
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Noise control algorithm
+ */
+
+#include <math.h>
+
+#include <libcamera/base/log.h>
+
+#include "../device_status.h"
+#include "../noise_status.h"
+
+#include "noise.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiNoise)
+
+#define NAME "rpi.noise"
+
+Noise::Noise(Controller *controller)
+ : Algorithm(controller), modeFactor_(1.0)
+{
+}
+
+char const *Noise::name() const
+{
+ return NAME;
+}
+
+void Noise::switchMode(CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+ /*
+ * For example, we would expect a 2x2 binned mode to have a "noise
+ * factor" of sqrt(2x2) = 2. (can't be less than one, right?)
+ */
+ modeFactor_ = std::max(1.0, cameraMode.noiseFactor);
+}
+
+int Noise::read(const libcamera::YamlObject &params)
+{
+ auto value = params["reference_constant"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceConstant_ = *value;
+
+ value = params["reference_slope"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceSlope_ = *value;
+
+ return 0;
+}
+
+void Noise::prepare(Metadata *imageMetadata)
+{
+ struct DeviceStatus deviceStatus;
+ deviceStatus.analogueGain = 1.0; /* keep compiler calm */
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ /*
+ * There is a slight question as to exactly how the noise
+ * profile, specifically the constant part of it, scales. For
+ * now we assume it all scales the same, and we'll revisit this
+ * if it proves substantially wrong. NOTE: we may also want to
+ * make some adjustments based on the camera mode (such as
+ * binning), if we knew how to discover it...
+ */
+ double factor = sqrt(deviceStatus.analogueGain) / modeFactor_;
+ struct NoiseStatus status;
+ status.noiseConstant = referenceConstant_ * factor;
+ status.noiseSlope = referenceSlope_ * factor;
+ imageMetadata->set("noise.status", status);
+ LOG(RPiNoise, Debug)
+ << "constant " << status.noiseConstant
+ << " slope " << status.noiseSlope;
+ } else
+ LOG(RPiNoise, Warning) << " no metadata";
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return new Noise(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/noise.h b/src/ipa/rpi/controller/rpi/noise.h
new file mode 100644
index 00000000..6deae1f0
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/noise.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Noise control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../noise_status.h"
+
+/* This is our implementation of the "noise algorithm". */
+
+namespace RPiController {
+
+class Noise : public Algorithm
+{
+public:
+ Noise(Controller *controller);
+ char const *name() const override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ /* the noise profile for analogue gain of 1.0 */
+ double referenceConstant_;
+ double referenceSlope_;
+ double modeFactor_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/saturation.cpp b/src/ipa/rpi/controller/rpi/saturation.cpp
new file mode 100644
index 00000000..b83c5887
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/saturation.cpp
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Saturation control algorithm
+ */
+#include "saturation.h"
+
+#include <libcamera/base/log.h>
+
+#include "saturation_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiSaturation)
+
+#define NAME "rpi.saturation"
+
+Saturation::Saturation(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Saturation::name() const
+{
+ return NAME;
+}
+
+int Saturation::read(const libcamera::YamlObject &params)
+{
+ config_.shiftR = params["shift_r"].get<uint8_t>(0);
+ config_.shiftG = params["shift_g"].get<uint8_t>(0);
+ config_.shiftB = params["shift_b"].get<uint8_t>(0);
+ return 0;
+}
+
+void Saturation::initialise()
+{
+}
+
+void Saturation::prepare(Metadata *imageMetadata)
+{
+ SaturationStatus saturation;
+
+ saturation.shiftR = config_.shiftR;
+ saturation.shiftG = config_.shiftG;
+ saturation.shiftB = config_.shiftB;
+ imageMetadata->set("saturation.status", saturation);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Saturation(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/rpi/controller/rpi/saturation.h b/src/ipa/rpi/controller/rpi/saturation.h
new file mode 100644
index 00000000..97da412a
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/saturation.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * saturation.hpp - Saturation control algorithm
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+struct SaturationConfig {
+ uint8_t shiftR;
+ uint8_t shiftG;
+ uint8_t shiftB;
+};
+
+class Saturation : public Algorithm
+{
+public:
+ Saturation(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ SaturationConfig config_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/rpi/sdn.cpp b/src/ipa/rpi/controller/rpi/sdn.cpp
new file mode 100644
index 00000000..619178a8
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/sdn.cpp
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * SDN (spatial denoise) control algorithm
+ */
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "../denoise_status.h"
+#include "../noise_status.h"
+
+#include "sdn.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiSdn)
+
+/*
+ * Calculate settings for the spatial denoise block using the noise profile in
+ * the image metadata.
+ */
+
+#define NAME "rpi.sdn"
+
+Sdn::Sdn(Controller *controller)
+ : DenoiseAlgorithm(controller), mode_(DenoiseMode::ColourOff)
+{
+}
+
+char const *Sdn::name() const
+{
+ return NAME;
+}
+
+int Sdn::read(const libcamera::YamlObject &params)
+{
+ LOG(RPiSdn, Warning)
+ << "Using legacy SDN tuning - please consider moving SDN inside rpi.denoise";
+ deviation_ = params["deviation"].get<double>(3.2);
+ strength_ = params["strength"].get<double>(0.75);
+ return 0;
+}
+
+void Sdn::initialise()
+{
+}
+
+void Sdn::prepare(Metadata *imageMetadata)
+{
+ struct NoiseStatus noiseStatus = {};
+ noiseStatus.noiseSlope = 3.0; /* in case no metadata */
+ if (imageMetadata->get("noise.status", noiseStatus) != 0)
+ LOG(RPiSdn, Warning) << "no noise profile found";
+ LOG(RPiSdn, Debug)
+ << "Noise profile: constant " << noiseStatus.noiseConstant
+ << " slope " << noiseStatus.noiseSlope;
+ struct DenoiseStatus status;
+ status.noiseConstant = noiseStatus.noiseConstant * deviation_;
+ status.noiseSlope = noiseStatus.noiseSlope * deviation_;
+ status.strength = strength_;
+ status.mode = utils::to_underlying(mode_);
+ imageMetadata->set("denoise.status", status);
+ LOG(RPiSdn, Debug)
+ << "programmed constant " << status.noiseConstant
+ << " slope " << status.noiseSlope
+ << " strength " << status.strength;
+}
+
+void Sdn::setMode(DenoiseMode mode)
+{
+ /* We only distinguish between off and all other modes. */
+ mode_ = mode;
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Sdn(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/sdn.h b/src/ipa/rpi/controller/rpi/sdn.h
new file mode 100644
index 00000000..cb226de8
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/sdn.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * SDN (spatial denoise) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../denoise_algorithm.h"
+
+namespace RPiController {
+
+/* Algorithm to calculate correct spatial denoise (SDN) settings. */
+
+class Sdn : public DenoiseAlgorithm
+{
+public:
+ Sdn(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+ void setMode(DenoiseMode mode) override;
+
+private:
+ double deviation_;
+ double strength_;
+ DenoiseMode mode_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/sharpen.cpp b/src/ipa/rpi/controller/rpi/sharpen.cpp
new file mode 100644
index 00000000..39537f4a
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/sharpen.cpp
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * sharpening control algorithm
+ */
+
+#include <math.h>
+
+#include <libcamera/base/log.h>
+
+#include "../sharpen_status.h"
+
+#include "sharpen.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiSharpen)
+
+#define NAME "rpi.sharpen"
+
+Sharpen::Sharpen(Controller *controller)
+ : SharpenAlgorithm(controller), userStrength_(1.0)
+{
+}
+
+char const *Sharpen::name() const
+{
+ return NAME;
+}
+
+void Sharpen::switchMode(CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+ /* can't be less than one, right? */
+ modeFactor_ = std::max(1.0, cameraMode.noiseFactor);
+}
+
+int Sharpen::read(const libcamera::YamlObject &params)
+{
+ threshold_ = params["threshold"].get<double>(1.0);
+ strength_ = params["strength"].get<double>(1.0);
+ limit_ = params["limit"].get<double>(1.0);
+ LOG(RPiSharpen, Debug)
+ << "Read threshold " << threshold_
+ << " strength " << strength_
+ << " limit " << limit_;
+ return 0;
+}
+
+void Sharpen::setStrength(double strength)
+{
+ /*
+ * Note that this function is how an application sets the overall
+ * sharpening "strength". We call this the "user strength" field
+ * as there already is a strength_ field - being an internal gain
+ * parameter that gets passed to the ISP control code. Negative
+ * values are not allowed - coerce them to zero (no sharpening).
+ */
+ userStrength_ = std::max(0.0, strength);
+}
+
+void Sharpen::prepare(Metadata *imageMetadata)
+{
+ /*
+ * The userStrength_ affects the algorithm's internal gain directly, but
+ * we adjust the limit and threshold less aggressively. Using a sqrt
+ * function is an arbitrary but gentle way of accomplishing this.
+ */
+ double userStrengthSqrt = sqrt(userStrength_);
+ struct SharpenStatus status;
+ /*
+ * Binned modes seem to need the sharpening toned down with this
+ * pipeline, thus we use the modeFactor_ here. Also avoid
+ * divide-by-zero with the userStrengthSqrt.
+ */
+ status.threshold = threshold_ * modeFactor_ /
+ std::max(0.01, userStrengthSqrt);
+ status.strength = strength_ / modeFactor_ * userStrength_;
+ status.limit = limit_ / modeFactor_ * userStrengthSqrt;
+ /* Finally, report any application-supplied parameters that were used. */
+ status.userStrength = userStrength_;
+ imageMetadata->set("sharpen.status", status);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return new Sharpen(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/sharpen.h b/src/ipa/rpi/controller/rpi/sharpen.h
new file mode 100644
index 00000000..96ccd609
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/sharpen.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * sharpening control algorithm
+ */
+#pragma once
+
+#include "../sharpen_algorithm.h"
+#include "../sharpen_status.h"
+
+/* This is our implementation of the "sharpen algorithm". */
+
+namespace RPiController {
+
+class Sharpen : public SharpenAlgorithm
+{
+public:
+ Sharpen(Controller *controller);
+ char const *name() const override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void setStrength(double strength) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ double threshold_;
+ double strength_;
+ double limit_;
+ double modeFactor_;
+ double userStrength_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/tonemap.cpp b/src/ipa/rpi/controller/rpi/tonemap.cpp
new file mode 100644
index 00000000..0426e972
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/tonemap.cpp
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Tonemap control algorithm
+ */
+#include "tonemap.h"
+
+#include <libcamera/base/log.h>
+
+#include "tonemap_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiTonemap)
+
+#define NAME "rpi.tonemap"
+
+Tonemap::Tonemap(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Tonemap::name() const
+{
+ return NAME;
+}
+
+int Tonemap::read(const libcamera::YamlObject &params)
+{
+ config_.detailConstant = params["detail_constant"].get<uint16_t>(0);
+ config_.detailSlope = params["detail_slope"].get<double>(0.1);
+ config_.iirStrength = params["iir_strength"].get<double>(1.0);
+ config_.strength = params["strength"].get<double>(1.0);
+ config_.tonemap.read(params["tone_curve"]);
+ return 0;
+}
+
+void Tonemap::initialise()
+{
+}
+
+void Tonemap::prepare(Metadata *imageMetadata)
+{
+ TonemapStatus tonemapStatus;
+
+ tonemapStatus.detailConstant = config_.detailConstant;
+ tonemapStatus.detailSlope = config_.detailSlope;
+ tonemapStatus.iirStrength = config_.iirStrength;
+ tonemapStatus.strength = config_.strength;
+ tonemapStatus.tonemap = config_.tonemap;
+ imageMetadata->set("tonemap.status", tonemapStatus);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Tonemap(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/rpi/controller/rpi/tonemap.h b/src/ipa/rpi/controller/rpi/tonemap.h
new file mode 100644
index 00000000..f25aa47f
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/tonemap.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * tonemap.hpp - Tonemap control algorithm
+ */
+#pragma once
+
+#include "algorithm.h"
+#include "pwl.h"
+
+namespace RPiController {
+
+struct TonemapConfig {
+ uint16_t detailConstant;
+ double detailSlope;
+ double iirStrength;
+ double strength;
+ Pwl tonemap;
+};
+
+class Tonemap : public Algorithm
+{
+public:
+ Tonemap(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ TonemapConfig config_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/saturation_status.h b/src/ipa/rpi/controller/saturation_status.h
new file mode 100644
index 00000000..c7fadc99
--- /dev/null
+++ b/src/ipa/rpi/controller/saturation_status.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Saturation control algorithm status
+ */
+#pragma once
+
+struct SaturationStatus {
+ uint8_t shiftR;
+ uint8_t shiftG;
+ uint8_t shiftB;
+};
diff --git a/src/ipa/rpi/controller/sharpen_algorithm.h b/src/ipa/rpi/controller/sharpen_algorithm.h
new file mode 100644
index 00000000..abd82cb2
--- /dev/null
+++ b/src/ipa/rpi/controller/sharpen_algorithm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * sharpness control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class SharpenAlgorithm : public Algorithm
+{
+public:
+ SharpenAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A sharpness control algorithm must provide the following: */
+ virtual void setStrength(double strength) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/sharpen_status.h b/src/ipa/rpi/controller/sharpen_status.h
new file mode 100644
index 00000000..74910199
--- /dev/null
+++ b/src/ipa/rpi/controller/sharpen_status.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Sharpen control algorithm status
+ */
+#pragma once
+
+/* The "sharpen" algorithm stores the strength to use. */
+
+struct SharpenStatus {
+ /* controls the smallest level of detail (or noise!) that sharpening will pick up */
+ double threshold;
+ /* the rate at which the sharpening response ramps once above the threshold */
+ double strength;
+ /* upper limit of the allowed sharpening response */
+ double limit;
+ /* The sharpening strength requested by the user or application. */
+ double userStrength;
+};
diff --git a/src/ipa/rpi/controller/statistics.h b/src/ipa/rpi/controller/statistics.h
new file mode 100644
index 00000000..cbd81161
--- /dev/null
+++ b/src/ipa/rpi/controller/statistics.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * Raspberry Pi generic statistics structure
+ */
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include "histogram.h"
+#include "region_stats.h"
+
+namespace RPiController {
+
+struct RgbySums {
+ RgbySums(uint64_t _rSum = 0, uint64_t _gSum = 0, uint64_t _bSum = 0, uint64_t _ySum = 0)
+ : rSum(_rSum), gSum(_gSum), bSum(_bSum), ySum(_ySum)
+ {
+ }
+ uint64_t rSum;
+ uint64_t gSum;
+ uint64_t bSum;
+ uint64_t ySum;
+};
+
+using RgbyRegions = RegionStats<RgbySums>;
+using FocusRegions = RegionStats<uint64_t>;
+
+struct Statistics {
+ /*
+ * All region based statistics are normalised to 16-bits, giving a
+ * maximum value of (1 << NormalisationFactorPow2) - 1.
+ */
+ static constexpr unsigned int NormalisationFactorPow2 = 16;
+
+ /*
+ * Positioning of the AGC statistics gathering in the pipeline:
+ * Pre-WB correction or post-WB correction.
+ * Assume this is post-LSC.
+ */
+ enum class AgcStatsPos { PreWb, PostWb };
+ const AgcStatsPos agcStatsPos;
+
+ /*
+ * Positioning of the AWB/ALSC statistics gathering in the pipeline:
+ * Pre-LSC or post-LSC.
+ */
+ enum class ColourStatsPos { PreLsc, PostLsc };
+ const ColourStatsPos colourStatsPos;
+
+ Statistics(AgcStatsPos a, ColourStatsPos c)
+ : agcStatsPos(a), colourStatsPos(c)
+ {
+ }
+
+ /* Histogram statistics. Not all histograms may be populated! */
+ Histogram rHist;
+ Histogram gHist;
+ Histogram bHist;
+ Histogram yHist;
+
+ /* Row sums for flicker avoidance. */
+ std::vector<RgbySums> rowSums;
+
+ /* Region based colour sums. */
+ RgbyRegions agcRegions;
+ RgbyRegions awbRegions;
+
+ /* Region based focus FoM. */
+ FocusRegions focusRegions;
+};
+
+using StatisticsPtr = std::shared_ptr<Statistics>;
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/stitch_status.h b/src/ipa/rpi/controller/stitch_status.h
new file mode 100644
index 00000000..7812f3e3
--- /dev/null
+++ b/src/ipa/rpi/controller/stitch_status.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * stitch control algorithm status
+ */
+#pragma once
+
+/*
+ * Parameters for the stitch block.
+ */
+
+struct StitchStatus {
+ uint16_t thresholdLo;
+ uint8_t diffPower;
+ double motionThreshold;
+};
diff --git a/src/ipa/rpi/controller/tonemap_status.h b/src/ipa/rpi/controller/tonemap_status.h
new file mode 100644
index 00000000..41a7bf2f
--- /dev/null
+++ b/src/ipa/rpi/controller/tonemap_status.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Tonemap control algorithm status
+ */
+#pragma once
+
+#include "pwl.h"
+
+struct TonemapStatus {
+ uint16_t detailConstant;
+ double detailSlope;
+ double iirStrength;
+ double strength;
+ RPiController::Pwl tonemap;
+};
diff --git a/src/ipa/rpi/meson.build b/src/ipa/rpi/meson.build
new file mode 100644
index 00000000..4811c76f
--- /dev/null
+++ b/src/ipa/rpi/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('cam_helper')
+subdir('common')
+subdir('controller')
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')
+ if pipeline.length() < 2 or pipeline[0] != 'rpi'
+ continue
+ endif
+
+ subdir(pipeline[1])
+endforeach
diff --git a/src/ipa/rpi/vc4/data/imx219.json b/src/ipa/rpi/vc4/data/imx219.json
new file mode 100644
index 00000000..a020b12f
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx219.json
@@ -0,0 +1,695 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27685,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 998,
+ "reference_Y": 12744
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 3.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01633
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2498.0, 0.9309, 0.3599,
+ 2911.0, 0.8682, 0.4283,
+ 2919.0, 0.8358, 0.4621,
+ 3627.0, 0.7646, 0.5327,
+ 4600.0, 0.6079, 0.6721,
+ 5716.0, 0.5712, 0.7017,
+ 8575.0, 0.4331, 0.8037
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.04791,
+ "transverse_neg": 0.04881
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
+ 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
+ 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
+ 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
+ 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
+ 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
+ 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
+ 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
+ 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
+ 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
+ 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
+ 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
+ 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
+ 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
+ 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
+ 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
+ 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
+ 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
+ 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
+ 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
+ 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
+ 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
+ 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
+ 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
+ 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
+ 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
+ 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
+ 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
+ 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
+ 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
+ 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
+ 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
+ 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
+ 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
+ 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
+ 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
+ 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
+ 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
+ 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
+ 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
+ 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
+ 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
+ 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
+ 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
+ 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
+ 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
+ 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
+ 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
+ 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
+ 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
+ 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
+ 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
+ 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
+ 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
+ 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
+ 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
+ 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
+ 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
+ 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
+ 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
+ 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
+ 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
+ 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
+ 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
+ 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
+ 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
+ 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
+ ],
+ "sigma": 0.00381,
+ "sigma_Cb": 0.00216
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2860,
+ "ccm":
+ [
+ 2.12089, -0.52461, -0.59629,
+ -0.85342, 2.80445, -0.95103,
+ -0.26897, -1.14788, 2.41685
+ ]
+ },
+ {
+ "ct": 2960,
+ "ccm":
+ [
+ 2.26962, -0.54174, -0.72789,
+ -0.77008, 2.60271, -0.83262,
+ -0.26036, -1.51254, 2.77289
+ ]
+ },
+ {
+ "ct": 3603,
+ "ccm":
+ [
+ 2.18644, -0.66148, -0.52496,
+ -0.77828, 2.69474, -0.91645,
+ -0.25239, -0.83059, 2.08298
+ ]
+ },
+ {
+ "ct": 4650,
+ "ccm":
+ [
+ 2.18174, -0.70887, -0.47287,
+ -0.70196, 2.76426, -1.06231,
+ -0.25157, -0.71978, 1.97135
+ ]
+ },
+ {
+ "ct": 5858,
+ "ccm":
+ [
+ 2.32392, -0.88421, -0.43971,
+ -0.63821, 2.58348, -0.94527,
+ -0.28541, -0.54112, 1.82653
+ ]
+ },
+ {
+ "ct": 7580,
+ "ccm":
+ [
+ 2.21175, -0.53242, -0.67933,
+ -0.57875, 3.07922, -1.50047,
+ -0.27709, -0.73338, 2.01048
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx219_noir.json b/src/ipa/rpi/vc4/data/imx219_noir.json
new file mode 100644
index 00000000..d8bc9639
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx219_noir.json
@@ -0,0 +1,629 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27685,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 998,
+ "reference_Y": 12744
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 3.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01633
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
+ 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
+ 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
+ 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
+ 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
+ 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
+ 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
+ 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
+ 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
+ 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
+ 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
+ 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
+ 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
+ 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
+ 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
+ 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
+ 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
+ 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
+ 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
+ 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
+ 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
+ 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
+ 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
+ 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
+ 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
+ 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
+ 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
+ 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
+ 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
+ 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
+ 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
+ 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
+ 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
+ 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
+ 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
+ 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
+ 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
+ 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
+ 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
+ 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
+ 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
+ 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
+ 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
+ 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
+ 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
+ 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
+ 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
+ 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
+ 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
+ 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
+ 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
+ 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
+ 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
+ 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
+ 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
+ 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
+ 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
+ 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
+ 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
+ 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
+ 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
+ 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
+ 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
+ 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
+ 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
+ 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
+ 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
+ ],
+ "sigma": 0.00381,
+ "sigma_Cb": 0.00216
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2498,
+ "ccm":
+ [
+ 1.58731, -0.18011, -0.40721,
+ -0.60639, 2.03422, -0.42782,
+ -0.19612, -1.69203, 2.88815
+ ]
+ },
+ {
+ "ct": 2811,
+ "ccm":
+ [
+ 1.61593, -0.33164, -0.28429,
+ -0.55048, 1.97779, -0.42731,
+ -0.12042, -1.42847, 2.54889
+ ]
+ },
+ {
+ "ct": 2911,
+ "ccm":
+ [
+ 1.62771, -0.41282, -0.21489,
+ -0.57991, 2.04176, -0.46186,
+ -0.07613, -1.13359, 2.20972
+ ]
+ },
+ {
+ "ct": 2919,
+ "ccm":
+ [
+ 1.62661, -0.37736, -0.24925,
+ -0.52519, 1.95233, -0.42714,
+ -0.10842, -1.34929, 2.45771
+ ]
+ },
+ {
+ "ct": 3627,
+ "ccm":
+ [
+ 1.70385, -0.57231, -0.13154,
+ -0.47763, 1.85998, -0.38235,
+ -0.07467, -0.82678, 1.90145
+ ]
+ },
+ {
+ "ct": 4600,
+ "ccm":
+ [
+ 1.68486, -0.61085, -0.07402,
+ -0.41927, 2.04016, -0.62089,
+ -0.08633, -0.67672, 1.76305
+ ]
+ },
+ {
+ "ct": 5716,
+ "ccm":
+ [
+ 1.80439, -0.73699, -0.06739,
+ -0.36073, 1.83327, -0.47255,
+ -0.08378, -0.56403, 1.64781
+ ]
+ },
+ {
+ "ct": 8575,
+ "ccm":
+ [
+ 1.89357, -0.76427, -0.12931,
+ -0.27399, 2.15605, -0.88206,
+ -0.12035, -0.68256, 1.80292
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx290.json b/src/ipa/rpi/vc4/data/imx290.json
new file mode 100644
index 00000000..8f41bf51
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx290.json
@@ -0,0 +1,214 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 6813,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 890,
+ "reference_Y": 12900
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 187,
+ "slope": 0.00842
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "speed": 0.2,
+ "metering_modes":
+ {
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ },
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 10, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 10, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.16
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "luminance_lut":
+ [
+ 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221,
+ 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067,
+ 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943,
+ 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861,
+ 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801,
+ 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766,
+ 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766,
+ 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795,
+ 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846,
+ 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933,
+ 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052,
+ 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 3900,
+ "ccm":
+ [
+ 1.54659, -0.17707, -0.36953,
+ -0.51471, 1.72733, -0.21262,
+ 0.06667, -0.92279, 1.85612
+ ]
+ }
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx296.json b/src/ipa/rpi/vc4/data/imx296.json
new file mode 100644
index 00000000..8f24ce5b
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx296.json
@@ -0,0 +1,443 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 7598,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 800,
+ "reference_Y": 14028
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.671
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.01058
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 7600
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 7600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2500.0, 0.5386, 0.2458,
+ 2800.0, 0.4883, 0.3303,
+ 2900.0, 0.4855, 0.3349,
+ 3620.0, 0.4203, 0.4367,
+ 4560.0, 0.3455, 0.5444,
+ 5600.0, 0.2948, 0.6124,
+ 7400.0, 0.2336, 0.6894
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.03093,
+ "transverse_neg": 0.02374
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 30000, 45000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 12.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 30000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0, 16.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 2.726, 2.736, 2.737, 2.739, 2.741, 2.741, 2.742, 2.742, 2.743, 2.743, 2.742, 2.742, 2.742, 2.742, 2.741, 2.739,
+ 2.728, 2.736, 2.739, 2.741, 2.742, 2.743, 2.744, 2.745, 2.746, 2.746, 2.745, 2.743, 2.742, 2.742, 2.742, 2.741,
+ 2.729, 2.737, 2.741, 2.744, 2.746, 2.747, 2.748, 2.749, 2.751, 2.751, 2.749, 2.746, 2.744, 2.743, 2.743, 2.743,
+ 2.729, 2.738, 2.743, 2.746, 2.749, 2.749, 2.751, 2.752, 2.753, 2.753, 2.752, 2.751, 2.746, 2.744, 2.744, 2.746,
+ 2.728, 2.737, 2.742, 2.746, 2.749, 2.751, 2.754, 2.755, 2.754, 2.755, 2.754, 2.751, 2.748, 2.746, 2.747, 2.748,
+ 2.724, 2.738, 2.742, 2.746, 2.749, 2.752, 2.755, 2.755, 2.755, 2.755, 2.754, 2.752, 2.749, 2.749, 2.748, 2.748,
+ 2.726, 2.738, 2.741, 2.745, 2.749, 2.753, 2.754, 2.755, 2.755, 2.755, 2.754, 2.753, 2.749, 2.748, 2.748, 2.748,
+ 2.726, 2.738, 2.741, 2.745, 2.746, 2.752, 2.753, 2.753, 2.753, 2.753, 2.754, 2.751, 2.748, 2.748, 2.746, 2.745,
+ 2.726, 2.736, 2.738, 2.742, 2.745, 2.749, 2.752, 2.753, 2.752, 2.752, 2.751, 2.749, 2.747, 2.745, 2.744, 2.742,
+ 2.724, 2.733, 2.736, 2.739, 2.742, 2.745, 2.748, 2.749, 2.749, 2.748, 2.748, 2.747, 2.744, 2.743, 2.742, 2.741,
+ 2.722, 2.726, 2.733, 2.735, 2.737, 2.741, 2.743, 2.744, 2.744, 2.744, 2.744, 2.742, 2.741, 2.741, 2.739, 2.737,
+ 2.719, 2.722, 2.727, 2.729, 2.731, 2.732, 2.734, 2.734, 2.735, 2.735, 2.735, 2.734, 2.733, 2.732, 2.732, 2.732
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 3.507, 3.522, 3.525, 3.527, 3.531, 3.533, 3.534, 3.535, 3.535, 3.536, 3.536, 3.537, 3.537, 3.538, 3.537, 3.536,
+ 3.511, 3.524, 3.528, 3.532, 3.533, 3.535, 3.537, 3.538, 3.538, 3.541, 3.539, 3.539, 3.539, 3.539, 3.538, 3.538,
+ 3.513, 3.528, 3.532, 3.535, 3.538, 3.542, 3.543, 3.546, 3.548, 3.551, 3.547, 3.543, 3.541, 3.541, 3.541, 3.541,
+ 3.513, 3.528, 3.533, 3.539, 3.544, 3.546, 3.548, 3.552, 3.553, 3.553, 3.552, 3.548, 3.543, 3.542, 3.542, 3.545,
+ 3.513, 3.528, 3.534, 3.541, 3.547, 3.549, 3.552, 3.553, 3.554, 3.554, 3.553, 3.549, 3.546, 3.544, 3.547, 3.549,
+ 3.508, 3.528, 3.533, 3.541, 3.548, 3.551, 3.553, 3.554, 3.555, 3.555, 3.555, 3.551, 3.548, 3.547, 3.549, 3.551,
+ 3.511, 3.529, 3.534, 3.541, 3.548, 3.551, 3.553, 3.555, 3.555, 3.555, 3.556, 3.554, 3.549, 3.548, 3.548, 3.548,
+ 3.511, 3.528, 3.533, 3.539, 3.546, 3.549, 3.553, 3.554, 3.554, 3.554, 3.554, 3.553, 3.549, 3.547, 3.547, 3.547,
+ 3.511, 3.527, 3.533, 3.536, 3.541, 3.547, 3.551, 3.553, 3.553, 3.552, 3.551, 3.551, 3.548, 3.544, 3.542, 3.543,
+ 3.507, 3.523, 3.528, 3.533, 3.538, 3.541, 3.546, 3.548, 3.549, 3.548, 3.548, 3.546, 3.542, 3.541, 3.541, 3.541,
+ 3.505, 3.514, 3.523, 3.527, 3.532, 3.537, 3.538, 3.544, 3.544, 3.544, 3.542, 3.541, 3.537, 3.537, 3.536, 3.535,
+ 3.503, 3.508, 3.515, 3.519, 3.521, 3.523, 3.524, 3.525, 3.526, 3.526, 3.527, 3.526, 3.524, 3.526, 3.527, 3.527
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 2.032, 2.037, 2.039, 2.041, 2.041, 2.042, 2.043, 2.044, 2.045, 2.045, 2.044, 2.043, 2.042, 2.041, 2.041, 2.034,
+ 2.032, 2.036, 2.039, 2.041, 2.042, 2.042, 2.043, 2.044, 2.045, 2.046, 2.045, 2.044, 2.042, 2.041, 2.039, 2.035,
+ 2.032, 2.036, 2.038, 2.041, 2.043, 2.044, 2.044, 2.045, 2.046, 2.047, 2.047, 2.045, 2.043, 2.042, 2.041, 2.037,
+ 2.032, 2.035, 2.039, 2.042, 2.043, 2.044, 2.045, 2.046, 2.048, 2.048, 2.047, 2.046, 2.045, 2.044, 2.042, 2.039,
+ 2.031, 2.034, 2.037, 2.039, 2.043, 2.045, 2.045, 2.046, 2.047, 2.047, 2.047, 2.046, 2.045, 2.044, 2.043, 2.039,
+ 2.029, 2.033, 2.036, 2.039, 2.042, 2.043, 2.045, 2.046, 2.046, 2.046, 2.046, 2.046, 2.046, 2.045, 2.044, 2.041,
+ 2.028, 2.032, 2.035, 2.039, 2.041, 2.043, 2.044, 2.045, 2.045, 2.046, 2.046, 2.046, 2.046, 2.045, 2.044, 2.039,
+ 2.027, 2.032, 2.035, 2.038, 2.039, 2.041, 2.044, 2.044, 2.044, 2.045, 2.046, 2.046, 2.046, 2.045, 2.044, 2.039,
+ 2.027, 2.031, 2.034, 2.035, 2.037, 2.039, 2.042, 2.043, 2.044, 2.045, 2.045, 2.046, 2.045, 2.044, 2.043, 2.038,
+ 2.025, 2.028, 2.032, 2.034, 2.036, 2.037, 2.041, 2.042, 2.043, 2.044, 2.044, 2.044, 2.044, 2.043, 2.041, 2.036,
+ 2.024, 2.026, 2.029, 2.032, 2.034, 2.036, 2.038, 2.041, 2.041, 2.042, 2.043, 2.042, 2.041, 2.041, 2.037, 2.036,
+ 2.022, 2.024, 2.027, 2.029, 2.032, 2.034, 2.036, 2.039, 2.039, 2.039, 2.041, 2.039, 2.039, 2.038, 2.036, 2.034
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.585, 1.587, 1.589, 1.589, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.589, 1.588, 1.588, 1.587, 1.581,
+ 1.585, 1.587, 1.588, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.588, 1.588, 1.587, 1.582,
+ 1.585, 1.586, 1.588, 1.589, 1.591, 1.591, 1.591, 1.591, 1.592, 1.592, 1.591, 1.591, 1.589, 1.588, 1.587, 1.584,
+ 1.585, 1.586, 1.588, 1.589, 1.591, 1.592, 1.592, 1.592, 1.593, 1.593, 1.592, 1.591, 1.589, 1.589, 1.588, 1.586,
+ 1.584, 1.586, 1.587, 1.589, 1.591, 1.591, 1.592, 1.592, 1.592, 1.592, 1.591, 1.591, 1.591, 1.589, 1.589, 1.586,
+ 1.583, 1.585, 1.587, 1.588, 1.589, 1.591, 1.591, 1.592, 1.592, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.586,
+ 1.583, 1.584, 1.586, 1.588, 1.589, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.585,
+ 1.581, 1.584, 1.586, 1.587, 1.588, 1.588, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.589, 1.585,
+ 1.581, 1.583, 1.584, 1.586, 1.587, 1.588, 1.589, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.589, 1.585,
+ 1.579, 1.581, 1.583, 1.584, 1.586, 1.586, 1.588, 1.589, 1.589, 1.589, 1.589, 1.589, 1.589, 1.589, 1.587, 1.584,
+ 1.578, 1.579, 1.581, 1.583, 1.584, 1.585, 1.586, 1.587, 1.588, 1.588, 1.588, 1.588, 1.588, 1.587, 1.585, 1.583,
+ 1.577, 1.578, 1.579, 1.582, 1.583, 1.584, 1.585, 1.586, 1.586, 1.587, 1.587, 1.587, 1.586, 1.586, 1.584, 1.583
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.112, 1.098, 1.078, 1.062, 1.049, 1.039, 1.031, 1.027, 1.026, 1.027, 1.034, 1.043, 1.054, 1.069, 1.087, 1.096,
+ 1.106, 1.091, 1.073, 1.056, 1.042, 1.032, 1.025, 1.021, 1.021, 1.022, 1.027, 1.036, 1.047, 1.061, 1.077, 1.088,
+ 1.101, 1.085, 1.066, 1.049, 1.035, 1.026, 1.019, 1.013, 1.013, 1.015, 1.021, 1.028, 1.039, 1.052, 1.069, 1.083,
+ 1.098, 1.081, 1.059, 1.045, 1.031, 1.021, 1.013, 1.007, 1.007, 1.009, 1.014, 1.021, 1.033, 1.046, 1.063, 1.081,
+ 1.097, 1.076, 1.057, 1.041, 1.027, 1.016, 1.007, 1.004, 1.002, 1.005, 1.009, 1.017, 1.028, 1.043, 1.061, 1.077,
+ 1.096, 1.075, 1.054, 1.039, 1.025, 1.014, 1.005, 1.001, 1.001, 1.002, 1.006, 1.015, 1.027, 1.041, 1.058, 1.076,
+ 1.096, 1.074, 1.054, 1.039, 1.025, 1.013, 1.005, 1.001, 1.001, 1.001, 1.006, 1.015, 1.026, 1.041, 1.058, 1.076,
+ 1.096, 1.075, 1.056, 1.041, 1.026, 1.014, 1.007, 1.003, 1.002, 1.004, 1.008, 1.016, 1.028, 1.041, 1.059, 1.076,
+ 1.096, 1.079, 1.059, 1.044, 1.029, 1.018, 1.011, 1.007, 1.005, 1.008, 1.012, 1.019, 1.031, 1.044, 1.061, 1.077,
+ 1.101, 1.084, 1.065, 1.049, 1.035, 1.024, 1.017, 1.011, 1.011, 1.012, 1.018, 1.025, 1.036, 1.051, 1.068, 1.081,
+ 1.106, 1.092, 1.072, 1.055, 1.042, 1.033, 1.024, 1.019, 1.018, 1.019, 1.025, 1.032, 1.044, 1.058, 1.076, 1.088,
+ 1.113, 1.097, 1.079, 1.063, 1.049, 1.039, 1.031, 1.025, 1.025, 1.025, 1.031, 1.039, 1.051, 1.065, 1.083, 1.094
+ ],
+ "sigma": 0.00047,
+ "sigma_Cb": 0.00056
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2500,
+ "ccm":
+ [
+ 1.95054, -0.57435, -0.37619,
+ -0.46945, 1.86661, -0.39716,
+ 0.07977, -1.14072, 2.06095
+ ]
+ },
+ {
+ "ct": 2800,
+ "ccm":
+ [
+ 1.94104, -0.60261, -0.33844,
+ -0.43162, 1.85422, -0.42261,
+ 0.03799, -0.95022, 1.91222
+ ]
+ },
+ {
+ "ct": 2900,
+ "ccm":
+ [
+ 1.91828, -0.59569, -0.32258,
+ -0.51902, 2.09091, -0.57189,
+ -0.03324, -0.73462, 1.76785
+ ]
+ },
+ {
+ "ct": 3620,
+ "ccm":
+ [
+ 1.97199, -0.66403, -0.30797,
+ -0.46411, 2.02612, -0.56201,
+ -0.07764, -0.61178, 1.68942
+ ]
+ },
+ {
+ "ct": 4560,
+ "ccm":
+ [
+ 2.15256, -0.84787, -0.30469,
+ -0.48422, 2.28962, -0.80541,
+ -0.15113, -0.53014, 1.68127
+ ]
+ },
+ {
+ "ct": 5600,
+ "ccm":
+ [
+ 2.04576, -0.74771, -0.29805,
+ -0.36332, 1.98993, -0.62662,
+ -0.09328, -0.46543, 1.55871
+ ]
+ },
+ {
+ "ct": 7400,
+ "ccm":
+ [
+ 2.37532, -0.83069, -0.54462,
+ -0.48279, 2.84309, -1.36031,
+ -0.21178, -0.66532, 1.87709
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen":
+ {
+ "threshold": 0.1,
+ "strength": 1.0,
+ "limit": 0.18
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx296_mono.json b/src/ipa/rpi/vc4/data/imx296_mono.json
new file mode 100644
index 00000000..fe331569
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx296_mono.json
@@ -0,0 +1,240 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 9998,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 823,
+ "reference_Y": 12396
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.753
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 0,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.308, 1.293, 1.228, 1.175, 1.139, 1.108, 1.092, 1.082, 1.082, 1.086, 1.097, 1.114, 1.149, 1.199, 1.279, 1.303,
+ 1.293, 1.249, 1.199, 1.162, 1.136, 1.109, 1.087, 1.077, 1.072, 1.081, 1.095, 1.103, 1.133, 1.172, 1.225, 1.282,
+ 1.251, 1.212, 1.186, 1.159, 1.129, 1.114, 1.102, 1.088, 1.088, 1.088, 1.095, 1.117, 1.123, 1.158, 1.198, 1.249,
+ 1.223, 1.192, 1.177, 1.163, 1.147, 1.139, 1.132, 1.112, 1.111, 1.107, 1.113, 1.118, 1.139, 1.155, 1.186, 1.232,
+ 1.207, 1.186, 1.171, 1.162, 1.168, 1.163, 1.153, 1.138, 1.129, 1.128, 1.132, 1.136, 1.149, 1.167, 1.189, 1.216,
+ 1.198, 1.186, 1.176, 1.176, 1.177, 1.185, 1.171, 1.157, 1.146, 1.144, 1.146, 1.149, 1.161, 1.181, 1.201, 1.221,
+ 1.203, 1.181, 1.176, 1.178, 1.191, 1.189, 1.188, 1.174, 1.159, 1.153, 1.158, 1.161, 1.169, 1.185, 1.211, 1.227,
+ 1.211, 1.179, 1.177, 1.187, 1.194, 1.196, 1.194, 1.187, 1.176, 1.169, 1.171, 1.171, 1.175, 1.189, 1.214, 1.226,
+ 1.219, 1.182, 1.184, 1.191, 1.195, 1.199, 1.197, 1.194, 1.188, 1.185, 1.179, 1.179, 1.182, 1.194, 1.212, 1.227,
+ 1.237, 1.192, 1.194, 1.194, 1.198, 1.199, 1.198, 1.197, 1.196, 1.193, 1.189, 1.189, 1.192, 1.203, 1.214, 1.231,
+ 1.282, 1.199, 1.199, 1.197, 1.199, 1.199, 1.192, 1.193, 1.193, 1.194, 1.196, 1.197, 1.206, 1.216, 1.228, 1.244,
+ 1.309, 1.236, 1.204, 1.203, 1.202, 1.194, 1.194, 1.188, 1.192, 1.192, 1.199, 1.201, 1.212, 1.221, 1.235, 1.247
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.sharpen":
+ {
+ "threshold": 0.1,
+ "strength": 1.0,
+ "limit": 0.18
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx378.json b/src/ipa/rpi/vc4/data/imx378.json
new file mode 100644
index 00000000..363b47e1
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx378.json
@@ -0,0 +1,427 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 9999,
+ "reference_gain": 1.95,
+ "reference_aperture": 1.0,
+ "reference_lux": 1000,
+ "reference_Y": 12996
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.641
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 235,
+ "slope": 0.00902
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8100
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2850.0, 0.6361, 0.3911,
+ 3550.0, 0.5386, 0.5077,
+ 4500.0, 0.4472, 0.6171,
+ 5600.0, 0.3906, 0.6848,
+ 8000.0, 0.3412, 0.7441
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.01667,
+ "transverse_neg": 0.01195
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 2800,
+ "table":
+ [
+ 1.604, 1.601, 1.593, 1.581, 1.568, 1.561, 1.561, 1.561, 1.561, 1.567, 1.582, 1.596, 1.609, 1.622, 1.632, 1.636,
+ 1.601, 1.594, 1.586, 1.571, 1.555, 1.546, 1.543, 1.543, 1.547, 1.555, 1.572, 1.584, 1.599, 1.614, 1.625, 1.632,
+ 1.599, 1.586, 1.571, 1.555, 1.542, 1.528, 1.518, 1.518, 1.523, 1.537, 1.555, 1.572, 1.589, 1.607, 1.622, 1.629,
+ 1.597, 1.579, 1.561, 1.542, 1.528, 1.512, 1.493, 1.493, 1.499, 1.523, 1.537, 1.563, 1.582, 1.601, 1.619, 1.629,
+ 1.597, 1.577, 1.557, 1.535, 1.512, 1.493, 1.481, 1.479, 1.492, 1.499, 1.524, 1.555, 1.578, 1.599, 1.619, 1.629,
+ 1.597, 1.577, 1.557, 1.534, 1.508, 1.483, 1.476, 1.476, 1.481, 1.496, 1.522, 1.554, 1.578, 1.599, 1.619, 1.629,
+ 1.597, 1.578, 1.557, 1.534, 1.508, 1.483, 1.481, 1.479, 1.481, 1.496, 1.522, 1.554, 1.579, 1.601, 1.619, 1.631,
+ 1.597, 1.581, 1.562, 1.539, 1.517, 1.504, 1.483, 1.481, 1.496, 1.511, 1.531, 1.561, 1.585, 1.607, 1.623, 1.632,
+ 1.601, 1.589, 1.569, 1.554, 1.539, 1.517, 1.504, 1.504, 1.511, 1.531, 1.553, 1.573, 1.596, 1.614, 1.629, 1.636,
+ 1.609, 1.601, 1.586, 1.569, 1.554, 1.542, 1.535, 1.535, 1.541, 1.553, 1.573, 1.592, 1.608, 1.625, 1.637, 1.645,
+ 1.617, 1.611, 1.601, 1.586, 1.574, 1.565, 1.564, 1.564, 1.571, 1.579, 1.592, 1.608, 1.622, 1.637, 1.646, 1.654,
+ 1.619, 1.617, 1.611, 1.601, 1.588, 1.585, 1.585, 1.585, 1.588, 1.592, 1.607, 1.622, 1.637, 1.645, 1.654, 1.655
+ ]
+ },
+ {
+ "ct": 5500,
+ "table":
+ [
+ 2.664, 2.658, 2.645, 2.629, 2.602, 2.602, 2.602, 2.606, 2.617, 2.628, 2.649, 2.677, 2.699, 2.722, 2.736, 2.747,
+ 2.658, 2.653, 2.629, 2.605, 2.576, 2.575, 2.577, 2.592, 2.606, 2.618, 2.629, 2.651, 2.678, 2.707, 2.727, 2.741,
+ 2.649, 2.631, 2.605, 2.576, 2.563, 2.552, 2.552, 2.557, 2.577, 2.604, 2.619, 2.641, 2.669, 2.698, 2.721, 2.741,
+ 2.643, 2.613, 2.583, 2.563, 2.552, 2.531, 2.527, 2.527, 2.551, 2.577, 2.604, 2.638, 2.665, 2.694, 2.721, 2.741,
+ 2.643, 2.606, 2.575, 2.558, 2.531, 2.516, 2.504, 2.516, 2.527, 2.551, 2.596, 2.635, 2.665, 2.694, 2.721, 2.741,
+ 2.643, 2.606, 2.575, 2.558, 2.531, 2.503, 2.501, 2.502, 2.522, 2.551, 2.592, 2.635, 2.669, 2.696, 2.727, 2.744,
+ 2.648, 2.611, 2.579, 2.558, 2.532, 2.511, 2.502, 2.511, 2.522, 2.552, 2.592, 2.642, 2.673, 2.702, 2.731, 2.752,
+ 2.648, 2.619, 2.589, 2.571, 2.556, 2.532, 2.519, 2.522, 2.552, 2.568, 2.605, 2.648, 2.683, 2.715, 2.743, 2.758,
+ 2.659, 2.637, 2.613, 2.589, 2.571, 2.556, 2.555, 2.555, 2.568, 2.605, 2.641, 2.671, 2.699, 2.729, 2.758, 2.776,
+ 2.679, 2.665, 2.637, 2.613, 2.602, 2.599, 2.599, 2.606, 2.619, 2.641, 2.671, 2.698, 2.723, 2.754, 2.776, 2.787,
+ 2.695, 2.684, 2.671, 2.646, 2.636, 2.636, 2.641, 2.648, 2.661, 2.681, 2.698, 2.723, 2.751, 2.776, 2.788, 2.803,
+ 2.702, 2.699, 2.684, 2.671, 2.664, 2.664, 2.664, 2.668, 2.681, 2.698, 2.723, 2.751, 2.773, 2.788, 2.803, 2.805
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 2800,
+ "table":
+ [
+ 2.876, 2.868, 2.863, 2.851, 2.846, 2.846, 2.847, 2.851, 2.851, 2.857, 2.867, 2.875, 2.889, 2.899, 2.913, 2.926,
+ 2.863, 2.861, 2.856, 2.846, 2.846, 2.847, 2.848, 2.851, 2.857, 2.859, 2.875, 2.882, 2.886, 2.896, 2.909, 2.917,
+ 2.861, 2.856, 2.846, 2.841, 2.841, 2.855, 2.867, 2.875, 2.888, 2.888, 2.885, 2.883, 2.886, 2.889, 2.901, 2.913,
+ 2.858, 2.851, 2.846, 2.846, 2.855, 2.867, 2.884, 2.895, 2.902, 2.902, 2.901, 2.891, 2.891, 2.894, 2.901, 2.909,
+ 2.858, 2.851, 2.846, 2.846, 2.867, 2.884, 2.895, 2.902, 2.909, 2.915, 2.911, 2.901, 2.895, 2.898, 2.904, 2.909,
+ 2.858, 2.851, 2.849, 2.853, 2.874, 2.888, 2.901, 2.909, 2.917, 2.922, 2.917, 2.911, 2.901, 2.899, 2.905, 2.908,
+ 2.861, 2.855, 2.853, 2.855, 2.874, 2.888, 2.901, 2.913, 2.918, 2.922, 2.921, 2.911, 2.901, 2.901, 2.907, 2.908,
+ 2.862, 2.859, 2.855, 2.856, 2.872, 2.885, 2.899, 2.906, 2.915, 2.917, 2.911, 2.907, 2.907, 2.907, 2.908, 2.909,
+ 2.863, 2.863, 2.859, 2.864, 2.871, 2.881, 2.885, 2.899, 2.905, 2.905, 2.904, 2.904, 2.907, 2.909, 2.913, 2.913,
+ 2.866, 2.865, 2.865, 2.867, 2.868, 2.872, 2.881, 2.885, 2.889, 2.894, 2.895, 2.902, 2.906, 2.913, 2.914, 2.917,
+ 2.875, 2.875, 2.871, 2.871, 2.871, 2.871, 2.869, 2.869, 2.878, 2.889, 2.894, 2.895, 2.906, 2.914, 2.917, 2.921,
+ 2.882, 2.879, 2.876, 2.874, 2.871, 2.871, 2.869, 2.869, 2.869, 2.878, 2.891, 2.894, 2.905, 2.914, 2.919, 2.921
+ ]
+ },
+ {
+ "ct": 5500,
+ "table":
+ [
+ 1.488, 1.488, 1.488, 1.488, 1.491, 1.492, 1.492, 1.491, 1.491, 1.491, 1.492, 1.495, 1.497, 1.499, 1.499, 1.503,
+ 1.482, 1.485, 1.485, 1.487, 1.489, 1.492, 1.492, 1.492, 1.492, 1.492, 1.494, 1.494, 1.492, 1.491, 1.493, 1.494,
+ 1.482, 1.482, 1.484, 1.485, 1.487, 1.492, 1.496, 1.498, 1.499, 1.498, 1.494, 1.492, 1.491, 1.491, 1.491, 1.491,
+ 1.481, 1.481, 1.482, 1.485, 1.491, 1.496, 1.498, 1.499, 1.501, 1.499, 1.498, 1.493, 1.491, 1.488, 1.488, 1.488,
+ 1.481, 1.481, 1.481, 1.483, 1.491, 1.497, 1.498, 1.499, 1.501, 1.499, 1.498, 1.492, 1.488, 1.485, 1.483, 1.483,
+ 1.479, 1.479, 1.481, 1.482, 1.489, 1.495, 1.497, 1.498, 1.499, 1.499, 1.495, 1.492, 1.485, 1.482, 1.482, 1.481,
+ 1.479, 1.479, 1.479, 1.481, 1.489, 1.494, 1.496, 1.497, 1.497, 1.496, 1.495, 1.489, 1.482, 1.481, 1.479, 1.477,
+ 1.478, 1.478, 1.479, 1.481, 1.487, 1.491, 1.494, 1.496, 1.496, 1.495, 1.492, 1.487, 1.482, 1.479, 1.478, 1.476,
+ 1.478, 1.478, 1.479, 1.482, 1.486, 1.488, 1.491, 1.493, 1.493, 1.492, 1.487, 1.484, 1.481, 1.479, 1.476, 1.476,
+ 1.477, 1.479, 1.481, 1.483, 1.485, 1.486, 1.488, 1.488, 1.487, 1.487, 1.484, 1.483, 1.481, 1.479, 1.476, 1.476,
+ 1.477, 1.479, 1.482, 1.483, 1.484, 1.485, 1.484, 1.482, 1.482, 1.484, 1.483, 1.482, 1.481, 1.479, 1.477, 1.476,
+ 1.477, 1.479, 1.482, 1.483, 1.484, 1.484, 1.482, 1.482, 1.482, 1.482, 1.482, 1.481, 1.479, 1.479, 1.479, 1.479
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.764, 2.654, 2.321, 2.043, 1.768, 1.594, 1.558, 1.558, 1.558, 1.568, 1.661, 1.904, 2.193, 2.497, 2.888, 3.043,
+ 2.654, 2.373, 2.049, 1.819, 1.569, 1.446, 1.381, 1.356, 1.356, 1.403, 1.501, 1.679, 1.939, 2.218, 2.586, 2.888,
+ 2.376, 2.154, 1.819, 1.569, 1.438, 1.301, 1.246, 1.224, 1.224, 1.263, 1.349, 1.501, 1.679, 1.985, 2.359, 2.609,
+ 2.267, 1.987, 1.662, 1.438, 1.301, 1.235, 1.132, 1.105, 1.105, 1.164, 1.263, 1.349, 1.528, 1.808, 2.184, 2.491,
+ 2.218, 1.876, 1.568, 1.367, 1.235, 1.132, 1.087, 1.022, 1.023, 1.104, 1.164, 1.278, 1.439, 1.695, 2.066, 2.429,
+ 2.218, 1.832, 1.533, 1.341, 1.206, 1.089, 1.013, 1.002, 1.013, 1.026, 1.122, 1.246, 1.399, 1.642, 2.004, 2.426,
+ 2.218, 1.832, 1.533, 1.341, 1.206, 1.089, 1.011, 1.001, 1.009, 1.026, 1.122, 1.246, 1.399, 1.642, 2.004, 2.426,
+ 2.224, 1.896, 1.584, 1.382, 1.248, 1.147, 1.088, 1.016, 1.026, 1.118, 1.168, 1.283, 1.444, 1.697, 2.066, 2.428,
+ 2.292, 2.019, 1.689, 1.462, 1.322, 1.247, 1.147, 1.118, 1.118, 1.168, 1.275, 1.358, 1.532, 1.809, 2.189, 2.491,
+ 2.444, 2.204, 1.856, 1.606, 1.462, 1.322, 1.257, 1.234, 1.234, 1.275, 1.358, 1.516, 1.686, 1.993, 2.371, 2.622,
+ 2.748, 2.444, 2.108, 1.856, 1.606, 1.476, 1.399, 1.376, 1.376, 1.422, 1.516, 1.686, 1.968, 2.238, 2.611, 2.935,
+ 2.862, 2.748, 2.395, 2.099, 1.811, 1.621, 1.582, 1.582, 1.582, 1.592, 1.677, 1.919, 2.223, 2.534, 2.935, 3.078
+ ],
+ "sigma": 0.00428,
+ "sigma_Cb": 0.00363
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2850,
+ "ccm":
+ [
+ 1.42601, -0.20537, -0.22063,
+ -0.47682, 1.81987, -0.34305,
+ 0.01854, -0.86036, 1.84181
+ ]
+ },
+ {
+ "ct": 2900,
+ "ccm":
+ [
+ 1.29755, 0.04602, -0.34356,
+ -0.41491, 1.73477, -0.31987,
+ -0.01345, -0.97115, 1.98459
+ ]
+ },
+ {
+ "ct": 3550,
+ "ccm":
+ [
+ 1.49811, -0.33412, -0.16398,
+ -0.40869, 1.72995, -0.32127,
+ -0.01924, -0.62181, 1.64105
+ ]
+ },
+ {
+ "ct": 4500,
+ "ccm":
+ [
+ 1.47015, -0.29229, -0.17786,
+ -0.36561, 1.88919, -0.52358,
+ -0.03552, -0.56717, 1.60269
+ ]
+ },
+ {
+ "ct": 5600,
+ "ccm":
+ [
+ 1.60962, -0.47434, -0.13528,
+ -0.32701, 1.73797, -0.41096,
+ -0.07626, -0.40171, 1.47796
+ ]
+ },
+ {
+ "ct": 8000,
+ "ccm":
+ [
+ 1.54642, -0.20396, -0.34246,
+ -0.31748, 2.22559, -0.90811,
+ -0.10035, -0.65877, 1.75912
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx477.json b/src/ipa/rpi/vc4/data/imx477.json
new file mode 100644
index 00000000..fa25ee86
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx477.json
@@ -0,0 +1,700 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27242,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 830,
+ "reference_Y": 17755
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.767
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01078
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2360.0, 0.6009, 0.3093,
+ 2848.0, 0.5071, 0.4,
+ 2903.0, 0.4905, 0.4392,
+ 3628.0, 0.4261, 0.5564,
+ 3643.0, 0.4228, 0.5623,
+ 4660.0, 0.3529, 0.68,
+ 5579.0, 0.3227, 0.7,
+ 6125.0, 0.3129, 0.71,
+ 6671.0, 0.3065, 0.72,
+ 7217.0, 0.3014, 0.73,
+ 7763.0, 0.295, 0.74,
+ 9505.0, 0.2524, 0.7856
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.0238,
+ "transverse_neg": 0.04429
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.088, 2.086, 2.082, 2.081, 2.077, 2.071, 2.068, 2.068, 2.072, 2.073, 2.075, 2.078, 2.084, 2.092, 2.095, 2.098,
+ 2.086, 2.084, 2.079, 2.078, 2.075, 2.068, 2.064, 2.063, 2.068, 2.071, 2.072, 2.075, 2.081, 2.089, 2.092, 2.094,
+ 2.083, 2.081, 2.077, 2.072, 2.069, 2.062, 2.059, 2.059, 2.063, 2.067, 2.069, 2.072, 2.079, 2.088, 2.089, 2.089,
+ 2.081, 2.077, 2.072, 2.068, 2.065, 2.058, 2.055, 2.054, 2.057, 2.062, 2.066, 2.069, 2.077, 2.084, 2.086, 2.086,
+ 2.078, 2.075, 2.069, 2.065, 2.061, 2.055, 2.052, 2.049, 2.051, 2.056, 2.062, 2.065, 2.072, 2.079, 2.081, 2.079,
+ 2.079, 2.075, 2.069, 2.064, 2.061, 2.053, 2.049, 2.046, 2.049, 2.051, 2.057, 2.062, 2.069, 2.075, 2.077, 2.075,
+ 2.082, 2.079, 2.072, 2.065, 2.061, 2.054, 2.049, 2.047, 2.049, 2.051, 2.056, 2.061, 2.066, 2.073, 2.073, 2.069,
+ 2.086, 2.082, 2.075, 2.068, 2.062, 2.054, 2.051, 2.049, 2.051, 2.052, 2.056, 2.061, 2.066, 2.073, 2.073, 2.072,
+ 2.088, 2.086, 2.079, 2.074, 2.066, 2.057, 2.051, 2.051, 2.054, 2.055, 2.056, 2.061, 2.067, 2.072, 2.073, 2.072,
+ 2.091, 2.087, 2.079, 2.075, 2.068, 2.057, 2.052, 2.052, 2.056, 2.055, 2.055, 2.059, 2.066, 2.072, 2.072, 2.072,
+ 2.093, 2.088, 2.081, 2.077, 2.069, 2.059, 2.054, 2.054, 2.057, 2.056, 2.056, 2.058, 2.066, 2.072, 2.073, 2.073,
+ 2.095, 2.091, 2.084, 2.078, 2.075, 2.067, 2.057, 2.057, 2.059, 2.059, 2.058, 2.059, 2.068, 2.073, 2.075, 2.078
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 2.973, 2.968, 2.956, 2.943, 2.941, 2.932, 2.923, 2.921, 2.924, 2.929, 2.931, 2.939, 2.953, 2.965, 2.966, 2.976,
+ 2.969, 2.962, 2.951, 2.941, 2.934, 2.928, 2.919, 2.918, 2.919, 2.923, 2.927, 2.933, 2.945, 2.957, 2.962, 2.962,
+ 2.964, 2.956, 2.944, 2.932, 2.929, 2.924, 2.915, 2.914, 2.915, 2.919, 2.924, 2.928, 2.941, 2.952, 2.958, 2.959,
+ 2.957, 2.951, 2.939, 2.928, 2.924, 2.919, 2.913, 2.911, 2.911, 2.915, 2.919, 2.925, 2.936, 2.947, 2.952, 2.953,
+ 2.954, 2.947, 2.935, 2.924, 2.919, 2.915, 2.908, 2.906, 2.906, 2.907, 2.914, 2.921, 2.932, 2.941, 2.943, 2.942,
+ 2.953, 2.946, 2.932, 2.921, 2.916, 2.911, 2.904, 2.902, 2.901, 2.904, 2.909, 2.919, 2.926, 2.937, 2.939, 2.939,
+ 2.953, 2.947, 2.932, 2.918, 2.915, 2.909, 2.903, 2.901, 2.901, 2.906, 2.911, 2.918, 2.924, 2.936, 2.936, 2.932,
+ 2.956, 2.948, 2.934, 2.919, 2.916, 2.908, 2.903, 2.901, 2.902, 2.907, 2.909, 2.917, 2.926, 2.936, 2.939, 2.939,
+ 2.957, 2.951, 2.936, 2.923, 2.917, 2.907, 2.904, 2.901, 2.902, 2.908, 2.911, 2.919, 2.929, 2.939, 2.942, 2.942,
+ 2.961, 2.951, 2.936, 2.922, 2.918, 2.906, 2.904, 2.901, 2.901, 2.907, 2.911, 2.921, 2.931, 2.941, 2.942, 2.944,
+ 2.964, 2.954, 2.936, 2.924, 2.918, 2.909, 2.905, 2.905, 2.905, 2.907, 2.912, 2.923, 2.933, 2.942, 2.944, 2.944,
+ 2.964, 2.958, 2.943, 2.927, 2.921, 2.914, 2.909, 2.907, 2.907, 2.912, 2.916, 2.928, 2.936, 2.944, 2.947, 2.952
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 3.312, 3.308, 3.301, 3.294, 3.288, 3.277, 3.268, 3.261, 3.259, 3.261, 3.267, 3.273, 3.285, 3.301, 3.303, 3.312,
+ 3.308, 3.304, 3.294, 3.291, 3.283, 3.271, 3.263, 3.259, 3.257, 3.258, 3.261, 3.268, 3.278, 3.293, 3.299, 3.299,
+ 3.302, 3.296, 3.288, 3.282, 3.276, 3.267, 3.259, 3.254, 3.252, 3.253, 3.256, 3.261, 3.273, 3.289, 3.292, 3.292,
+ 3.296, 3.289, 3.282, 3.276, 3.269, 3.263, 3.256, 3.251, 3.248, 3.249, 3.251, 3.257, 3.268, 3.279, 3.284, 3.284,
+ 3.292, 3.285, 3.279, 3.271, 3.264, 3.257, 3.249, 3.243, 3.241, 3.241, 3.246, 3.252, 3.261, 3.274, 3.275, 3.273,
+ 3.291, 3.285, 3.276, 3.268, 3.259, 3.251, 3.242, 3.239, 3.236, 3.238, 3.244, 3.248, 3.258, 3.268, 3.269, 3.265,
+ 3.294, 3.288, 3.275, 3.266, 3.257, 3.248, 3.239, 3.238, 3.237, 3.238, 3.243, 3.246, 3.255, 3.264, 3.264, 3.257,
+ 3.297, 3.293, 3.279, 3.268, 3.258, 3.249, 3.238, 3.237, 3.239, 3.239, 3.243, 3.245, 3.255, 3.264, 3.264, 3.263,
+ 3.301, 3.295, 3.281, 3.271, 3.259, 3.248, 3.237, 3.237, 3.239, 3.241, 3.243, 3.246, 3.257, 3.265, 3.266, 3.264,
+ 3.306, 3.295, 3.279, 3.271, 3.261, 3.247, 3.235, 3.234, 3.239, 3.239, 3.243, 3.247, 3.258, 3.265, 3.265, 3.264,
+ 3.308, 3.297, 3.279, 3.272, 3.261, 3.249, 3.239, 3.239, 3.241, 3.243, 3.245, 3.248, 3.261, 3.265, 3.266, 3.265,
+ 3.309, 3.301, 3.286, 3.276, 3.267, 3.256, 3.246, 3.242, 3.244, 3.244, 3.249, 3.253, 3.263, 3.267, 3.271, 3.274
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.133, 2.134, 2.139, 2.143, 2.148, 2.155, 2.158, 2.158, 2.158, 2.161, 2.161, 2.162, 2.159, 2.156, 2.152, 2.151,
+ 2.132, 2.133, 2.135, 2.142, 2.147, 2.153, 2.158, 2.158, 2.158, 2.158, 2.159, 2.159, 2.157, 2.154, 2.151, 2.148,
+ 2.133, 2.133, 2.135, 2.142, 2.149, 2.154, 2.158, 2.158, 2.157, 2.156, 2.158, 2.157, 2.155, 2.153, 2.148, 2.146,
+ 2.133, 2.133, 2.138, 2.145, 2.149, 2.154, 2.158, 2.159, 2.158, 2.155, 2.157, 2.156, 2.153, 2.149, 2.146, 2.144,
+ 2.133, 2.134, 2.139, 2.146, 2.149, 2.154, 2.158, 2.159, 2.159, 2.156, 2.154, 2.154, 2.149, 2.145, 2.143, 2.139,
+ 2.135, 2.135, 2.139, 2.146, 2.151, 2.155, 2.158, 2.159, 2.158, 2.156, 2.153, 2.151, 2.146, 2.143, 2.139, 2.136,
+ 2.135, 2.135, 2.138, 2.145, 2.151, 2.154, 2.157, 2.158, 2.157, 2.156, 2.153, 2.151, 2.147, 2.143, 2.141, 2.137,
+ 2.135, 2.134, 2.135, 2.141, 2.149, 2.154, 2.157, 2.157, 2.157, 2.157, 2.157, 2.153, 2.149, 2.146, 2.142, 2.139,
+ 2.132, 2.133, 2.135, 2.139, 2.148, 2.153, 2.158, 2.159, 2.159, 2.161, 2.161, 2.157, 2.154, 2.149, 2.144, 2.141,
+ 2.132, 2.133, 2.135, 2.141, 2.149, 2.155, 2.161, 2.161, 2.162, 2.162, 2.163, 2.159, 2.154, 2.149, 2.144, 2.138,
+ 2.136, 2.136, 2.137, 2.143, 2.149, 2.156, 2.162, 2.163, 2.162, 2.163, 2.164, 2.161, 2.157, 2.152, 2.146, 2.138,
+ 2.137, 2.137, 2.141, 2.147, 2.152, 2.157, 2.162, 2.162, 2.159, 2.161, 2.162, 2.162, 2.157, 2.152, 2.148, 2.148
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 1.463, 1.464, 1.471, 1.478, 1.479, 1.483, 1.484, 1.486, 1.486, 1.484, 1.483, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.463, 1.463, 1.468, 1.476, 1.479, 1.482, 1.484, 1.487, 1.486, 1.484, 1.483, 1.482, 1.478, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.468, 1.476, 1.479, 1.483, 1.484, 1.486, 1.486, 1.485, 1.484, 1.482, 1.477, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.483, 1.485, 1.487, 1.487, 1.485, 1.485, 1.482, 1.478, 1.474, 1.469, 1.468,
+ 1.465, 1.465, 1.471, 1.478, 1.481, 1.484, 1.486, 1.488, 1.488, 1.487, 1.485, 1.482, 1.477, 1.472, 1.468, 1.467,
+ 1.465, 1.466, 1.472, 1.479, 1.482, 1.485, 1.486, 1.488, 1.488, 1.486, 1.484, 1.479, 1.475, 1.472, 1.468, 1.466,
+ 1.466, 1.466, 1.472, 1.478, 1.482, 1.484, 1.485, 1.488, 1.487, 1.485, 1.483, 1.479, 1.475, 1.472, 1.469, 1.468,
+ 1.465, 1.466, 1.469, 1.476, 1.481, 1.485, 1.485, 1.486, 1.486, 1.485, 1.483, 1.479, 1.477, 1.474, 1.471, 1.469,
+ 1.464, 1.465, 1.469, 1.476, 1.481, 1.484, 1.485, 1.487, 1.487, 1.486, 1.485, 1.481, 1.478, 1.475, 1.471, 1.469,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.485, 1.485, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.464, 1.465, 1.471, 1.478, 1.482, 1.486, 1.486, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.472, 1.468,
+ 1.465, 1.466, 1.472, 1.481, 1.483, 1.487, 1.487, 1.488, 1.488, 1.486, 1.485, 1.481, 1.479, 1.476, 1.473, 1.472
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 1.443, 1.444, 1.448, 1.453, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.466, 1.462, 1.457, 1.454, 1.451,
+ 1.443, 1.444, 1.445, 1.451, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.451,
+ 1.444, 1.444, 1.445, 1.451, 1.459, 1.463, 1.466, 1.468, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.444, 1.447, 1.452, 1.459, 1.464, 1.467, 1.469, 1.471, 1.469, 1.467, 1.466, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.445, 1.448, 1.452, 1.459, 1.465, 1.469, 1.471, 1.471, 1.471, 1.468, 1.465, 1.461, 1.455, 1.451, 1.449,
+ 1.445, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.471, 1.472, 1.469, 1.467, 1.465, 1.459, 1.455, 1.451, 1.447,
+ 1.446, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.459, 1.455, 1.452, 1.449,
+ 1.446, 1.446, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.461, 1.457, 1.454, 1.451,
+ 1.444, 1.444, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.471, 1.471, 1.468, 1.466, 1.462, 1.458, 1.454, 1.452,
+ 1.444, 1.444, 1.448, 1.453, 1.459, 1.466, 1.469, 1.471, 1.472, 1.472, 1.468, 1.466, 1.462, 1.458, 1.454, 1.449,
+ 1.446, 1.447, 1.449, 1.454, 1.461, 1.466, 1.471, 1.471, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.455, 1.449,
+ 1.447, 1.447, 1.452, 1.457, 1.462, 1.468, 1.472, 1.472, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.456, 1.455
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.548, 1.499, 1.387, 1.289, 1.223, 1.183, 1.164, 1.154, 1.153, 1.169, 1.211, 1.265, 1.345, 1.448, 1.581, 1.619,
+ 1.513, 1.412, 1.307, 1.228, 1.169, 1.129, 1.105, 1.098, 1.103, 1.127, 1.157, 1.209, 1.272, 1.361, 1.481, 1.583,
+ 1.449, 1.365, 1.257, 1.175, 1.124, 1.085, 1.062, 1.054, 1.059, 1.079, 1.113, 1.151, 1.211, 1.293, 1.407, 1.488,
+ 1.424, 1.324, 1.222, 1.139, 1.089, 1.056, 1.034, 1.031, 1.034, 1.049, 1.075, 1.115, 1.164, 1.241, 1.351, 1.446,
+ 1.412, 1.297, 1.203, 1.119, 1.069, 1.039, 1.021, 1.016, 1.022, 1.032, 1.052, 1.086, 1.135, 1.212, 1.321, 1.439,
+ 1.406, 1.287, 1.195, 1.115, 1.059, 1.028, 1.014, 1.012, 1.015, 1.026, 1.041, 1.074, 1.125, 1.201, 1.302, 1.425,
+ 1.406, 1.294, 1.205, 1.126, 1.062, 1.031, 1.013, 1.009, 1.011, 1.019, 1.042, 1.079, 1.129, 1.203, 1.302, 1.435,
+ 1.415, 1.318, 1.229, 1.146, 1.076, 1.039, 1.019, 1.014, 1.017, 1.031, 1.053, 1.093, 1.144, 1.219, 1.314, 1.436,
+ 1.435, 1.348, 1.246, 1.164, 1.094, 1.059, 1.036, 1.032, 1.037, 1.049, 1.072, 1.114, 1.167, 1.257, 1.343, 1.462,
+ 1.471, 1.385, 1.278, 1.189, 1.124, 1.084, 1.064, 1.061, 1.069, 1.078, 1.101, 1.146, 1.207, 1.298, 1.415, 1.496,
+ 1.522, 1.436, 1.323, 1.228, 1.169, 1.118, 1.101, 1.094, 1.099, 1.113, 1.146, 1.194, 1.265, 1.353, 1.474, 1.571,
+ 1.578, 1.506, 1.378, 1.281, 1.211, 1.156, 1.135, 1.134, 1.139, 1.158, 1.194, 1.251, 1.327, 1.427, 1.559, 1.611
+ ],
+ "sigma": 0.00121,
+ "sigma_Cb": 0.00115
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2850,
+ "ccm":
+ [
+ 1.97469, -0.71439, -0.26031,
+ -0.43521, 2.09769, -0.66248,
+ -0.04826, -0.84642, 1.89468
+ ]
+ },
+ {
+ "ct": 2960,
+ "ccm":
+ [
+ 2.12952, -0.91185, -0.21768,
+ -0.38018, 1.90789, -0.52771,
+ 0.03988, -1.10079, 2.06092
+ ]
+ },
+ {
+ "ct": 3580,
+ "ccm":
+ [
+ 2.03422, -0.80048, -0.23374,
+ -0.39089, 1.97221, -0.58132,
+ -0.08969, -0.61439, 1.70408
+ ]
+ },
+ {
+ "ct": 4559,
+ "ccm":
+ [
+ 2.15423, -0.98143, -0.17279,
+ -0.38131, 2.14763, -0.76632,
+ -0.10069, -0.54383, 1.64452
+ ]
+ },
+ {
+ "ct": 5881,
+ "ccm":
+ [
+ 2.18464, -0.95493, -0.22971,
+ -0.36826, 2.00298, -0.63471,
+ -0.15219, -0.38055, 1.53274
+ ]
+ },
+ {
+ "ct": 7600,
+ "ccm":
+ [
+ 2.30687, -0.97295, -0.33392,
+ -0.30872, 2.32779, -1.01908,
+ -0.17761, -0.55891, 1.73651
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx477_noir.json b/src/ipa/rpi/vc4/data/imx477_noir.json
new file mode 100644
index 00000000..472f33fe
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx477_noir.json
@@ -0,0 +1,656 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27242,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 830,
+ "reference_Y": 17755
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.767
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01078
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.088, 2.086, 2.082, 2.081, 2.077, 2.071, 2.068, 2.068, 2.072, 2.073, 2.075, 2.078, 2.084, 2.092, 2.095, 2.098,
+ 2.086, 2.084, 2.079, 2.078, 2.075, 2.068, 2.064, 2.063, 2.068, 2.071, 2.072, 2.075, 2.081, 2.089, 2.092, 2.094,
+ 2.083, 2.081, 2.077, 2.072, 2.069, 2.062, 2.059, 2.059, 2.063, 2.067, 2.069, 2.072, 2.079, 2.088, 2.089, 2.089,
+ 2.081, 2.077, 2.072, 2.068, 2.065, 2.058, 2.055, 2.054, 2.057, 2.062, 2.066, 2.069, 2.077, 2.084, 2.086, 2.086,
+ 2.078, 2.075, 2.069, 2.065, 2.061, 2.055, 2.052, 2.049, 2.051, 2.056, 2.062, 2.065, 2.072, 2.079, 2.081, 2.079,
+ 2.079, 2.075, 2.069, 2.064, 2.061, 2.053, 2.049, 2.046, 2.049, 2.051, 2.057, 2.062, 2.069, 2.075, 2.077, 2.075,
+ 2.082, 2.079, 2.072, 2.065, 2.061, 2.054, 2.049, 2.047, 2.049, 2.051, 2.056, 2.061, 2.066, 2.073, 2.073, 2.069,
+ 2.086, 2.082, 2.075, 2.068, 2.062, 2.054, 2.051, 2.049, 2.051, 2.052, 2.056, 2.061, 2.066, 2.073, 2.073, 2.072,
+ 2.088, 2.086, 2.079, 2.074, 2.066, 2.057, 2.051, 2.051, 2.054, 2.055, 2.056, 2.061, 2.067, 2.072, 2.073, 2.072,
+ 2.091, 2.087, 2.079, 2.075, 2.068, 2.057, 2.052, 2.052, 2.056, 2.055, 2.055, 2.059, 2.066, 2.072, 2.072, 2.072,
+ 2.093, 2.088, 2.081, 2.077, 2.069, 2.059, 2.054, 2.054, 2.057, 2.056, 2.056, 2.058, 2.066, 2.072, 2.073, 2.073,
+ 2.095, 2.091, 2.084, 2.078, 2.075, 2.067, 2.057, 2.057, 2.059, 2.059, 2.058, 2.059, 2.068, 2.073, 2.075, 2.078
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 2.973, 2.968, 2.956, 2.943, 2.941, 2.932, 2.923, 2.921, 2.924, 2.929, 2.931, 2.939, 2.953, 2.965, 2.966, 2.976,
+ 2.969, 2.962, 2.951, 2.941, 2.934, 2.928, 2.919, 2.918, 2.919, 2.923, 2.927, 2.933, 2.945, 2.957, 2.962, 2.962,
+ 2.964, 2.956, 2.944, 2.932, 2.929, 2.924, 2.915, 2.914, 2.915, 2.919, 2.924, 2.928, 2.941, 2.952, 2.958, 2.959,
+ 2.957, 2.951, 2.939, 2.928, 2.924, 2.919, 2.913, 2.911, 2.911, 2.915, 2.919, 2.925, 2.936, 2.947, 2.952, 2.953,
+ 2.954, 2.947, 2.935, 2.924, 2.919, 2.915, 2.908, 2.906, 2.906, 2.907, 2.914, 2.921, 2.932, 2.941, 2.943, 2.942,
+ 2.953, 2.946, 2.932, 2.921, 2.916, 2.911, 2.904, 2.902, 2.901, 2.904, 2.909, 2.919, 2.926, 2.937, 2.939, 2.939,
+ 2.953, 2.947, 2.932, 2.918, 2.915, 2.909, 2.903, 2.901, 2.901, 2.906, 2.911, 2.918, 2.924, 2.936, 2.936, 2.932,
+ 2.956, 2.948, 2.934, 2.919, 2.916, 2.908, 2.903, 2.901, 2.902, 2.907, 2.909, 2.917, 2.926, 2.936, 2.939, 2.939,
+ 2.957, 2.951, 2.936, 2.923, 2.917, 2.907, 2.904, 2.901, 2.902, 2.908, 2.911, 2.919, 2.929, 2.939, 2.942, 2.942,
+ 2.961, 2.951, 2.936, 2.922, 2.918, 2.906, 2.904, 2.901, 2.901, 2.907, 2.911, 2.921, 2.931, 2.941, 2.942, 2.944,
+ 2.964, 2.954, 2.936, 2.924, 2.918, 2.909, 2.905, 2.905, 2.905, 2.907, 2.912, 2.923, 2.933, 2.942, 2.944, 2.944,
+ 2.964, 2.958, 2.943, 2.927, 2.921, 2.914, 2.909, 2.907, 2.907, 2.912, 2.916, 2.928, 2.936, 2.944, 2.947, 2.952
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 3.312, 3.308, 3.301, 3.294, 3.288, 3.277, 3.268, 3.261, 3.259, 3.261, 3.267, 3.273, 3.285, 3.301, 3.303, 3.312,
+ 3.308, 3.304, 3.294, 3.291, 3.283, 3.271, 3.263, 3.259, 3.257, 3.258, 3.261, 3.268, 3.278, 3.293, 3.299, 3.299,
+ 3.302, 3.296, 3.288, 3.282, 3.276, 3.267, 3.259, 3.254, 3.252, 3.253, 3.256, 3.261, 3.273, 3.289, 3.292, 3.292,
+ 3.296, 3.289, 3.282, 3.276, 3.269, 3.263, 3.256, 3.251, 3.248, 3.249, 3.251, 3.257, 3.268, 3.279, 3.284, 3.284,
+ 3.292, 3.285, 3.279, 3.271, 3.264, 3.257, 3.249, 3.243, 3.241, 3.241, 3.246, 3.252, 3.261, 3.274, 3.275, 3.273,
+ 3.291, 3.285, 3.276, 3.268, 3.259, 3.251, 3.242, 3.239, 3.236, 3.238, 3.244, 3.248, 3.258, 3.268, 3.269, 3.265,
+ 3.294, 3.288, 3.275, 3.266, 3.257, 3.248, 3.239, 3.238, 3.237, 3.238, 3.243, 3.246, 3.255, 3.264, 3.264, 3.257,
+ 3.297, 3.293, 3.279, 3.268, 3.258, 3.249, 3.238, 3.237, 3.239, 3.239, 3.243, 3.245, 3.255, 3.264, 3.264, 3.263,
+ 3.301, 3.295, 3.281, 3.271, 3.259, 3.248, 3.237, 3.237, 3.239, 3.241, 3.243, 3.246, 3.257, 3.265, 3.266, 3.264,
+ 3.306, 3.295, 3.279, 3.271, 3.261, 3.247, 3.235, 3.234, 3.239, 3.239, 3.243, 3.247, 3.258, 3.265, 3.265, 3.264,
+ 3.308, 3.297, 3.279, 3.272, 3.261, 3.249, 3.239, 3.239, 3.241, 3.243, 3.245, 3.248, 3.261, 3.265, 3.266, 3.265,
+ 3.309, 3.301, 3.286, 3.276, 3.267, 3.256, 3.246, 3.242, 3.244, 3.244, 3.249, 3.253, 3.263, 3.267, 3.271, 3.274
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.133, 2.134, 2.139, 2.143, 2.148, 2.155, 2.158, 2.158, 2.158, 2.161, 2.161, 2.162, 2.159, 2.156, 2.152, 2.151,
+ 2.132, 2.133, 2.135, 2.142, 2.147, 2.153, 2.158, 2.158, 2.158, 2.158, 2.159, 2.159, 2.157, 2.154, 2.151, 2.148,
+ 2.133, 2.133, 2.135, 2.142, 2.149, 2.154, 2.158, 2.158, 2.157, 2.156, 2.158, 2.157, 2.155, 2.153, 2.148, 2.146,
+ 2.133, 2.133, 2.138, 2.145, 2.149, 2.154, 2.158, 2.159, 2.158, 2.155, 2.157, 2.156, 2.153, 2.149, 2.146, 2.144,
+ 2.133, 2.134, 2.139, 2.146, 2.149, 2.154, 2.158, 2.159, 2.159, 2.156, 2.154, 2.154, 2.149, 2.145, 2.143, 2.139,
+ 2.135, 2.135, 2.139, 2.146, 2.151, 2.155, 2.158, 2.159, 2.158, 2.156, 2.153, 2.151, 2.146, 2.143, 2.139, 2.136,
+ 2.135, 2.135, 2.138, 2.145, 2.151, 2.154, 2.157, 2.158, 2.157, 2.156, 2.153, 2.151, 2.147, 2.143, 2.141, 2.137,
+ 2.135, 2.134, 2.135, 2.141, 2.149, 2.154, 2.157, 2.157, 2.157, 2.157, 2.157, 2.153, 2.149, 2.146, 2.142, 2.139,
+ 2.132, 2.133, 2.135, 2.139, 2.148, 2.153, 2.158, 2.159, 2.159, 2.161, 2.161, 2.157, 2.154, 2.149, 2.144, 2.141,
+ 2.132, 2.133, 2.135, 2.141, 2.149, 2.155, 2.161, 2.161, 2.162, 2.162, 2.163, 2.159, 2.154, 2.149, 2.144, 2.138,
+ 2.136, 2.136, 2.137, 2.143, 2.149, 2.156, 2.162, 2.163, 2.162, 2.163, 2.164, 2.161, 2.157, 2.152, 2.146, 2.138,
+ 2.137, 2.137, 2.141, 2.147, 2.152, 2.157, 2.162, 2.162, 2.159, 2.161, 2.162, 2.162, 2.157, 2.152, 2.148, 2.148
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 1.463, 1.464, 1.471, 1.478, 1.479, 1.483, 1.484, 1.486, 1.486, 1.484, 1.483, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.463, 1.463, 1.468, 1.476, 1.479, 1.482, 1.484, 1.487, 1.486, 1.484, 1.483, 1.482, 1.478, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.468, 1.476, 1.479, 1.483, 1.484, 1.486, 1.486, 1.485, 1.484, 1.482, 1.477, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.483, 1.485, 1.487, 1.487, 1.485, 1.485, 1.482, 1.478, 1.474, 1.469, 1.468,
+ 1.465, 1.465, 1.471, 1.478, 1.481, 1.484, 1.486, 1.488, 1.488, 1.487, 1.485, 1.482, 1.477, 1.472, 1.468, 1.467,
+ 1.465, 1.466, 1.472, 1.479, 1.482, 1.485, 1.486, 1.488, 1.488, 1.486, 1.484, 1.479, 1.475, 1.472, 1.468, 1.466,
+ 1.466, 1.466, 1.472, 1.478, 1.482, 1.484, 1.485, 1.488, 1.487, 1.485, 1.483, 1.479, 1.475, 1.472, 1.469, 1.468,
+ 1.465, 1.466, 1.469, 1.476, 1.481, 1.485, 1.485, 1.486, 1.486, 1.485, 1.483, 1.479, 1.477, 1.474, 1.471, 1.469,
+ 1.464, 1.465, 1.469, 1.476, 1.481, 1.484, 1.485, 1.487, 1.487, 1.486, 1.485, 1.481, 1.478, 1.475, 1.471, 1.469,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.485, 1.485, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.464, 1.465, 1.471, 1.478, 1.482, 1.486, 1.486, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.472, 1.468,
+ 1.465, 1.466, 1.472, 1.481, 1.483, 1.487, 1.487, 1.488, 1.488, 1.486, 1.485, 1.481, 1.479, 1.476, 1.473, 1.472
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 1.443, 1.444, 1.448, 1.453, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.466, 1.462, 1.457, 1.454, 1.451,
+ 1.443, 1.444, 1.445, 1.451, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.451,
+ 1.444, 1.444, 1.445, 1.451, 1.459, 1.463, 1.466, 1.468, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.444, 1.447, 1.452, 1.459, 1.464, 1.467, 1.469, 1.471, 1.469, 1.467, 1.466, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.445, 1.448, 1.452, 1.459, 1.465, 1.469, 1.471, 1.471, 1.471, 1.468, 1.465, 1.461, 1.455, 1.451, 1.449,
+ 1.445, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.471, 1.472, 1.469, 1.467, 1.465, 1.459, 1.455, 1.451, 1.447,
+ 1.446, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.459, 1.455, 1.452, 1.449,
+ 1.446, 1.446, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.461, 1.457, 1.454, 1.451,
+ 1.444, 1.444, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.471, 1.471, 1.468, 1.466, 1.462, 1.458, 1.454, 1.452,
+ 1.444, 1.444, 1.448, 1.453, 1.459, 1.466, 1.469, 1.471, 1.472, 1.472, 1.468, 1.466, 1.462, 1.458, 1.454, 1.449,
+ 1.446, 1.447, 1.449, 1.454, 1.461, 1.466, 1.471, 1.471, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.455, 1.449,
+ 1.447, 1.447, 1.452, 1.457, 1.462, 1.468, 1.472, 1.472, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.456, 1.455
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.548, 1.499, 1.387, 1.289, 1.223, 1.183, 1.164, 1.154, 1.153, 1.169, 1.211, 1.265, 1.345, 1.448, 1.581, 1.619,
+ 1.513, 1.412, 1.307, 1.228, 1.169, 1.129, 1.105, 1.098, 1.103, 1.127, 1.157, 1.209, 1.272, 1.361, 1.481, 1.583,
+ 1.449, 1.365, 1.257, 1.175, 1.124, 1.085, 1.062, 1.054, 1.059, 1.079, 1.113, 1.151, 1.211, 1.293, 1.407, 1.488,
+ 1.424, 1.324, 1.222, 1.139, 1.089, 1.056, 1.034, 1.031, 1.034, 1.049, 1.075, 1.115, 1.164, 1.241, 1.351, 1.446,
+ 1.412, 1.297, 1.203, 1.119, 1.069, 1.039, 1.021, 1.016, 1.022, 1.032, 1.052, 1.086, 1.135, 1.212, 1.321, 1.439,
+ 1.406, 1.287, 1.195, 1.115, 1.059, 1.028, 1.014, 1.012, 1.015, 1.026, 1.041, 1.074, 1.125, 1.201, 1.302, 1.425,
+ 1.406, 1.294, 1.205, 1.126, 1.062, 1.031, 1.013, 1.009, 1.011, 1.019, 1.042, 1.079, 1.129, 1.203, 1.302, 1.435,
+ 1.415, 1.318, 1.229, 1.146, 1.076, 1.039, 1.019, 1.014, 1.017, 1.031, 1.053, 1.093, 1.144, 1.219, 1.314, 1.436,
+ 1.435, 1.348, 1.246, 1.164, 1.094, 1.059, 1.036, 1.032, 1.037, 1.049, 1.072, 1.114, 1.167, 1.257, 1.343, 1.462,
+ 1.471, 1.385, 1.278, 1.189, 1.124, 1.084, 1.064, 1.061, 1.069, 1.078, 1.101, 1.146, 1.207, 1.298, 1.415, 1.496,
+ 1.522, 1.436, 1.323, 1.228, 1.169, 1.118, 1.101, 1.094, 1.099, 1.113, 1.146, 1.194, 1.265, 1.353, 1.474, 1.571,
+ 1.578, 1.506, 1.378, 1.281, 1.211, 1.156, 1.135, 1.134, 1.139, 1.158, 1.194, 1.251, 1.327, 1.427, 1.559, 1.611
+ ],
+ "sigma": 0.00121,
+ "sigma_Cb": 0.00115
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2360,
+ "ccm":
+ [
+ 1.66078, -0.23588, -0.42491,
+ -0.47456, 1.82763, -0.35307,
+ -0.00545, -1.44729, 2.45273
+ ]
+ },
+ {
+ "ct": 2870,
+ "ccm":
+ [
+ 1.78373, -0.55344, -0.23029,
+ -0.39951, 1.69701, -0.29751,
+ 0.01986, -1.06525, 2.04539
+ ]
+ },
+ {
+ "ct": 2970,
+ "ccm":
+ [
+ 1.73511, -0.56973, -0.16537,
+ -0.36338, 1.69878, -0.33539,
+ -0.02354, -0.76813, 1.79168
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 2.06374, -0.92218, -0.14156,
+ -0.41721, 1.69289, -0.27568,
+ -0.00554, -0.92741, 1.93295
+ ]
+ },
+ {
+ "ct": 3700,
+ "ccm":
+ [
+ 2.13792, -1.08136, -0.05655,
+ -0.34739, 1.58989, -0.24249,
+ -0.00349, -0.76789, 1.77138
+ ]
+ },
+ {
+ "ct": 3870,
+ "ccm":
+ [
+ 1.83834, -0.70528, -0.13307,
+ -0.30499, 1.60523, -0.30024,
+ -0.05701, -0.58313, 1.64014
+ ]
+ },
+ {
+ "ct": 4000,
+ "ccm":
+ [
+ 2.15741, -1.10295, -0.05447,
+ -0.34631, 1.61158, -0.26528,
+ -0.02723, -0.70288, 1.73011
+ ]
+ },
+ {
+ "ct": 4400,
+ "ccm":
+ [
+ 2.05729, -0.95007, -0.10723,
+ -0.41712, 1.78606, -0.36894,
+ -0.11899, -0.55727, 1.67626
+ ]
+ },
+ {
+ "ct": 4715,
+ "ccm":
+ [
+ 1.90255, -0.77478, -0.12777,
+ -0.31338, 1.88197, -0.56858,
+ -0.06001, -0.61785, 1.67786
+ ]
+ },
+ {
+ "ct": 5920,
+ "ccm":
+ [
+ 1.98691, -0.84671, -0.14019,
+ -0.26581, 1.70615, -0.44035,
+ -0.09532, -0.47332, 1.56864
+ ]
+ },
+ {
+ "ct": 9050,
+ "ccm":
+ [
+ 2.09255, -0.76541, -0.32714,
+ -0.28973, 2.27462, -0.98489,
+ -0.17299, -0.61275, 1.78574
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx477_scientific.json b/src/ipa/rpi/vc4/data/imx477_scientific.json
new file mode 100644
index 00000000..9dc32eb1
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx477_scientific.json
@@ -0,0 +1,488 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27242,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 830,
+ "reference_Y": 17755
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.767
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01078
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2000.0, 0.6331025775790707, 0.27424225990946915,
+ 2200.0, 0.5696117366212947, 0.3116091368689487,
+ 2400.0, 0.5204264653110015, 0.34892179554105873,
+ 2600.0, 0.48148675531667223, 0.38565229719076793,
+ 2800.0, 0.450085403501908, 0.42145684622485047,
+ 3000.0, 0.42436130159169017, 0.45611835670028816,
+ 3200.0, 0.40300023695527337, 0.48950766215198593,
+ 3400.0, 0.3850520052612984, 0.5215567075837261,
+ 3600.0, 0.36981508088230314, 0.5522397906415475,
+ 4100.0, 0.333468007836758, 0.5909770465167908,
+ 4600.0, 0.31196097364221376, 0.6515706327327178,
+ 5100.0, 0.2961860409294588, 0.7068178946570284,
+ 5600.0, 0.2842607232745885, 0.7564837749584288,
+ 6100.0, 0.2750265787051251, 0.8006183524920533,
+ 6600.0, 0.2677057225584924, 0.8398879225373039,
+ 7100.0, 0.2617955199757274, 0.8746456080032436,
+ 7600.0, 0.25693714288250125, 0.905569559506562,
+ 8100.0, 0.25287531441063316, 0.9331696750390895,
+ 8600.0, 0.24946601483331993, 0.9576820904825795
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.0238,
+ "transverse_neg": 0.04429,
+ "coarse_step": 0.1
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 0,
+ "gamma_curve":
+ [
+ 0, 0,
+ 512, 2304,
+ 1024, 4608,
+ 1536, 6573,
+ 2048, 8401,
+ 2560, 9992,
+ 3072, 11418,
+ 3584, 12719,
+ 4096, 13922,
+ 4608, 15045,
+ 5120, 16103,
+ 5632, 17104,
+ 6144, 18056,
+ 6656, 18967,
+ 7168, 19839,
+ 7680, 20679,
+ 8192, 21488,
+ 9216, 23028,
+ 10240, 24477,
+ 11264, 25849,
+ 12288, 27154,
+ 13312, 28401,
+ 14336, 29597,
+ 15360, 30747,
+ 16384, 31856,
+ 17408, 32928,
+ 18432, 33966,
+ 19456, 34973,
+ 20480, 35952,
+ 22528, 37832,
+ 24576, 39621,
+ 26624, 41330,
+ 28672, 42969,
+ 30720, 44545,
+ 32768, 46065,
+ 34816, 47534,
+ 36864, 48956,
+ 38912, 50336,
+ 40960, 51677,
+ 43008, 52982,
+ 45056, 54253,
+ 47104, 55493,
+ 49152, 56704,
+ 51200, 57888,
+ 53248, 59046,
+ 55296, 60181,
+ 57344, 61292,
+ 59392, 62382,
+ 61440, 63452,
+ 63488, 64503,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2000,
+ "ccm":
+ [
+ 1.5813882365848004, -0.35293683714581114, -0.27378771561617715,
+ -0.4347297185453639, 1.5792631087746074, -0.12102601986382337,
+ 0.2322290578987574, -1.4382672640468128, 2.1386425781770755
+ ]
+ },
+ {
+ "ct": 2200,
+ "ccm":
+ [
+ 1.6322048484088305, -0.45932286857238486, -0.21373542690252198,
+ -0.3970719209901105, 1.5877868651467202, -0.17249380832122455,
+ 0.20753774825903412, -1.2660673594740142, 2.005654261091916
+ ]
+ },
+ {
+ "ct": 2400,
+ "ccm":
+ [
+ 1.6766610071470398, -0.5447101051688111, -0.16838641107407676,
+ -0.3659845183388154, 1.592223692670396, -0.2127091997471162,
+ 0.1833964516767549, -1.1339155942419321, 1.9089342978542396
+ ]
+ },
+ {
+ "ct": 2600,
+ "ccm":
+ [
+ 1.7161984340622154, -0.6152585785678794, -0.1331100845092582,
+ -0.33972082628066275, 1.5944888273736966, -0.2453979465898787,
+ 0.1615577497676328, -1.0298684958833109, 1.8357854177422053
+ ]
+ },
+ {
+ "ct": 2800,
+ "ccm":
+ [
+ 1.7519307259815728, -0.6748682080165339, -0.10515169074540848,
+ -0.3171703484479931, 1.5955820297498486, -0.2727395854813966,
+ 0.14230870739974305, -0.9460976023551511, 1.778709391659538
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 1.7846716625128374, -0.7261240476375332, -0.08274697420358428,
+ -0.2975654035173307, 1.5960425637021738, -0.2961043416505157,
+ 0.12546426281675097, -0.8773434727076518, 1.7330356805246685
+ ]
+ },
+ {
+ "ct": 3200,
+ "ccm":
+ [
+ 1.8150085872943436, -0.7708109672515514, -0.06469468211419174,
+ -0.2803468940646277, 1.596168842967451, -0.3164044170681625,
+ 0.11071494533513807, -0.8199772290209191, 1.69572135046367
+ ]
+ },
+ {
+ "ct": 3400,
+ "ccm":
+ [
+ 1.8433668304932087, -0.8102060605062592, -0.05013485852801454,
+ -0.2650934036324084, 1.5961288492969294, -0.33427554893845535,
+ 0.0977478941863518, -0.7714303112098978, 1.6647070820146963
+ ]
+ },
+ {
+ "ct": 3600,
+ "ccm":
+ [
+ 1.8700575831917468, -0.8452518300291346, -0.03842644337477299,
+ -0.2514794528347016, 1.5960178299141876, -0.3501774949366156,
+ 0.08628520830733245, -0.729841503339915, 1.638553343939267
+ ]
+ },
+ {
+ "ct": 4100,
+ "ccm":
+ [
+ 1.8988700903560716, -0.8911278803351247, -0.018848644425650693,
+ -0.21487101487384094, 1.599236541382614, -0.39405450457918206,
+ 0.08251488056482173, -0.7178919368326191, 1.6267009056502704
+ ]
+ },
+ {
+ "ct": 4600,
+ "ccm":
+ [
+ 1.960355191764125, -0.9624344812121991, -0.0017122408632169205,
+ -0.19444620905212898, 1.5978493736948447, -0.416727638296156,
+ 0.06310261513271084, -0.6483790952487849, 1.5834605477213093
+ ]
+ },
+ {
+ "ct": 5100,
+ "ccm":
+ [
+ 2.014680536961399, -1.0195930302148566, 0.007728256612638915,
+ -0.17751999660735496, 1.5977081555831, -0.4366085498741474,
+ 0.04741267583041334, -0.5950327902073489, 1.5512919847321853
+ ]
+ },
+ {
+ "ct": 5600,
+ "ccm":
+ [
+ 2.062652337917251, -1.0658386679125478, 0.011886354256281267,
+ -0.16319197721451495, 1.598363237584736, -0.45422061523742235,
+ 0.03465810928795378, -0.5535454108047286, 1.5269025836946852
+ ]
+ },
+ {
+ "ct": 6100,
+ "ccm":
+ [
+ 2.104985902038069, -1.103597868736314, 0.012503517136539277,
+ -0.15090797064906178, 1.5994703078166095, -0.4698414300864995,
+ 0.02421766063474242, -0.5208922818196823, 1.5081270847783788
+ ]
+ },
+ {
+ "ct": 6600,
+ "ccm":
+ [
+ 2.1424988751299714, -1.134760232367728, 0.010730356010435522,
+ -0.14021846798466234, 1.600822462230719, -0.48379204794526487,
+ 0.015521315410496622, -0.49463630325832275, 1.4933313534840327
+ ]
+ },
+ {
+ "ct": 7100,
+ "ccm":
+ [
+ 2.1758034100130925, -1.1607558481037359, 0.007452724895469076,
+ -0.13085694672641826, 1.6022648614493245, -0.4962330524084075,
+ 0.008226943206113427, -0.4733077192319791, 1.4815336120437468
+ ]
+ },
+ {
+ "ct": 7600,
+ "ccm":
+ [
+ 2.205529206931895, -1.1826662383072108, 0.0032019529917605167,
+ -0.122572009780486, 1.6037258133595753, -0.5073973734282445,
+ 0.0020132587619863425, -0.4556590236414181, 1.471939788496745
+ ]
+ },
+ {
+ "ct": 8100,
+ "ccm":
+ [
+ 2.232224969223067, -1.2013672897252885, -0.0016234598095482985,
+ -0.11518026734442414, 1.6051544769439803, -0.5174558699422255,
+ -0.0033378143542219835, -0.4408590373867774, 1.4640252230667452
+ ]
+ },
+ {
+ "ct": 8600,
+ "ccm":
+ [
+ 2.256082295891265, -1.2173210549996634, -0.0067231350481711675,
+ -0.10860272839843167, 1.6065150139140594, -0.5264728573611493,
+ -0.007952618707984149, -0.4284003574050791, 1.4574646927117558
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx477_v1.json b/src/ipa/rpi/vc4/data/imx477_v1.json
new file mode 100644
index 00000000..55e4adc1
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx477_v1.json
@@ -0,0 +1,525 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27242,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 830,
+ "reference_Y": 17755
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.767
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01078
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2360.0, 0.6009, 0.3093,
+ 2870.0, 0.5047, 0.3936,
+ 2970.0, 0.4782, 0.4221,
+ 3700.0, 0.4212, 0.4923,
+ 3870.0, 0.4037, 0.5166,
+ 4000.0, 0.3965, 0.5271,
+ 4400.0, 0.3703, 0.5666,
+ 4715.0, 0.3411, 0.6147,
+ 5920.0, 0.3108, 0.6687,
+ 9050.0, 0.2524, 0.7856
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.0238,
+ "transverse_neg": 0.04429
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.088, 2.086, 2.082, 2.081, 2.077, 2.071, 2.068, 2.068, 2.072, 2.073, 2.075, 2.078, 2.084, 2.092, 2.095, 2.098,
+ 2.086, 2.084, 2.079, 2.078, 2.075, 2.068, 2.064, 2.063, 2.068, 2.071, 2.072, 2.075, 2.081, 2.089, 2.092, 2.094,
+ 2.083, 2.081, 2.077, 2.072, 2.069, 2.062, 2.059, 2.059, 2.063, 2.067, 2.069, 2.072, 2.079, 2.088, 2.089, 2.089,
+ 2.081, 2.077, 2.072, 2.068, 2.065, 2.058, 2.055, 2.054, 2.057, 2.062, 2.066, 2.069, 2.077, 2.084, 2.086, 2.086,
+ 2.078, 2.075, 2.069, 2.065, 2.061, 2.055, 2.052, 2.049, 2.051, 2.056, 2.062, 2.065, 2.072, 2.079, 2.081, 2.079,
+ 2.079, 2.075, 2.069, 2.064, 2.061, 2.053, 2.049, 2.046, 2.049, 2.051, 2.057, 2.062, 2.069, 2.075, 2.077, 2.075,
+ 2.082, 2.079, 2.072, 2.065, 2.061, 2.054, 2.049, 2.047, 2.049, 2.051, 2.056, 2.061, 2.066, 2.073, 2.073, 2.069,
+ 2.086, 2.082, 2.075, 2.068, 2.062, 2.054, 2.051, 2.049, 2.051, 2.052, 2.056, 2.061, 2.066, 2.073, 2.073, 2.072,
+ 2.088, 2.086, 2.079, 2.074, 2.066, 2.057, 2.051, 2.051, 2.054, 2.055, 2.056, 2.061, 2.067, 2.072, 2.073, 2.072,
+ 2.091, 2.087, 2.079, 2.075, 2.068, 2.057, 2.052, 2.052, 2.056, 2.055, 2.055, 2.059, 2.066, 2.072, 2.072, 2.072,
+ 2.093, 2.088, 2.081, 2.077, 2.069, 2.059, 2.054, 2.054, 2.057, 2.056, 2.056, 2.058, 2.066, 2.072, 2.073, 2.073,
+ 2.095, 2.091, 2.084, 2.078, 2.075, 2.067, 2.057, 2.057, 2.059, 2.059, 2.058, 2.059, 2.068, 2.073, 2.075, 2.078
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 2.973, 2.968, 2.956, 2.943, 2.941, 2.932, 2.923, 2.921, 2.924, 2.929, 2.931, 2.939, 2.953, 2.965, 2.966, 2.976,
+ 2.969, 2.962, 2.951, 2.941, 2.934, 2.928, 2.919, 2.918, 2.919, 2.923, 2.927, 2.933, 2.945, 2.957, 2.962, 2.962,
+ 2.964, 2.956, 2.944, 2.932, 2.929, 2.924, 2.915, 2.914, 2.915, 2.919, 2.924, 2.928, 2.941, 2.952, 2.958, 2.959,
+ 2.957, 2.951, 2.939, 2.928, 2.924, 2.919, 2.913, 2.911, 2.911, 2.915, 2.919, 2.925, 2.936, 2.947, 2.952, 2.953,
+ 2.954, 2.947, 2.935, 2.924, 2.919, 2.915, 2.908, 2.906, 2.906, 2.907, 2.914, 2.921, 2.932, 2.941, 2.943, 2.942,
+ 2.953, 2.946, 2.932, 2.921, 2.916, 2.911, 2.904, 2.902, 2.901, 2.904, 2.909, 2.919, 2.926, 2.937, 2.939, 2.939,
+ 2.953, 2.947, 2.932, 2.918, 2.915, 2.909, 2.903, 2.901, 2.901, 2.906, 2.911, 2.918, 2.924, 2.936, 2.936, 2.932,
+ 2.956, 2.948, 2.934, 2.919, 2.916, 2.908, 2.903, 2.901, 2.902, 2.907, 2.909, 2.917, 2.926, 2.936, 2.939, 2.939,
+ 2.957, 2.951, 2.936, 2.923, 2.917, 2.907, 2.904, 2.901, 2.902, 2.908, 2.911, 2.919, 2.929, 2.939, 2.942, 2.942,
+ 2.961, 2.951, 2.936, 2.922, 2.918, 2.906, 2.904, 2.901, 2.901, 2.907, 2.911, 2.921, 2.931, 2.941, 2.942, 2.944,
+ 2.964, 2.954, 2.936, 2.924, 2.918, 2.909, 2.905, 2.905, 2.905, 2.907, 2.912, 2.923, 2.933, 2.942, 2.944, 2.944,
+ 2.964, 2.958, 2.943, 2.927, 2.921, 2.914, 2.909, 2.907, 2.907, 2.912, 2.916, 2.928, 2.936, 2.944, 2.947, 2.952
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 3.312, 3.308, 3.301, 3.294, 3.288, 3.277, 3.268, 3.261, 3.259, 3.261, 3.267, 3.273, 3.285, 3.301, 3.303, 3.312,
+ 3.308, 3.304, 3.294, 3.291, 3.283, 3.271, 3.263, 3.259, 3.257, 3.258, 3.261, 3.268, 3.278, 3.293, 3.299, 3.299,
+ 3.302, 3.296, 3.288, 3.282, 3.276, 3.267, 3.259, 3.254, 3.252, 3.253, 3.256, 3.261, 3.273, 3.289, 3.292, 3.292,
+ 3.296, 3.289, 3.282, 3.276, 3.269, 3.263, 3.256, 3.251, 3.248, 3.249, 3.251, 3.257, 3.268, 3.279, 3.284, 3.284,
+ 3.292, 3.285, 3.279, 3.271, 3.264, 3.257, 3.249, 3.243, 3.241, 3.241, 3.246, 3.252, 3.261, 3.274, 3.275, 3.273,
+ 3.291, 3.285, 3.276, 3.268, 3.259, 3.251, 3.242, 3.239, 3.236, 3.238, 3.244, 3.248, 3.258, 3.268, 3.269, 3.265,
+ 3.294, 3.288, 3.275, 3.266, 3.257, 3.248, 3.239, 3.238, 3.237, 3.238, 3.243, 3.246, 3.255, 3.264, 3.264, 3.257,
+ 3.297, 3.293, 3.279, 3.268, 3.258, 3.249, 3.238, 3.237, 3.239, 3.239, 3.243, 3.245, 3.255, 3.264, 3.264, 3.263,
+ 3.301, 3.295, 3.281, 3.271, 3.259, 3.248, 3.237, 3.237, 3.239, 3.241, 3.243, 3.246, 3.257, 3.265, 3.266, 3.264,
+ 3.306, 3.295, 3.279, 3.271, 3.261, 3.247, 3.235, 3.234, 3.239, 3.239, 3.243, 3.247, 3.258, 3.265, 3.265, 3.264,
+ 3.308, 3.297, 3.279, 3.272, 3.261, 3.249, 3.239, 3.239, 3.241, 3.243, 3.245, 3.248, 3.261, 3.265, 3.266, 3.265,
+ 3.309, 3.301, 3.286, 3.276, 3.267, 3.256, 3.246, 3.242, 3.244, 3.244, 3.249, 3.253, 3.263, 3.267, 3.271, 3.274
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.133, 2.134, 2.139, 2.143, 2.148, 2.155, 2.158, 2.158, 2.158, 2.161, 2.161, 2.162, 2.159, 2.156, 2.152, 2.151,
+ 2.132, 2.133, 2.135, 2.142, 2.147, 2.153, 2.158, 2.158, 2.158, 2.158, 2.159, 2.159, 2.157, 2.154, 2.151, 2.148,
+ 2.133, 2.133, 2.135, 2.142, 2.149, 2.154, 2.158, 2.158, 2.157, 2.156, 2.158, 2.157, 2.155, 2.153, 2.148, 2.146,
+ 2.133, 2.133, 2.138, 2.145, 2.149, 2.154, 2.158, 2.159, 2.158, 2.155, 2.157, 2.156, 2.153, 2.149, 2.146, 2.144,
+ 2.133, 2.134, 2.139, 2.146, 2.149, 2.154, 2.158, 2.159, 2.159, 2.156, 2.154, 2.154, 2.149, 2.145, 2.143, 2.139,
+ 2.135, 2.135, 2.139, 2.146, 2.151, 2.155, 2.158, 2.159, 2.158, 2.156, 2.153, 2.151, 2.146, 2.143, 2.139, 2.136,
+ 2.135, 2.135, 2.138, 2.145, 2.151, 2.154, 2.157, 2.158, 2.157, 2.156, 2.153, 2.151, 2.147, 2.143, 2.141, 2.137,
+ 2.135, 2.134, 2.135, 2.141, 2.149, 2.154, 2.157, 2.157, 2.157, 2.157, 2.157, 2.153, 2.149, 2.146, 2.142, 2.139,
+ 2.132, 2.133, 2.135, 2.139, 2.148, 2.153, 2.158, 2.159, 2.159, 2.161, 2.161, 2.157, 2.154, 2.149, 2.144, 2.141,
+ 2.132, 2.133, 2.135, 2.141, 2.149, 2.155, 2.161, 2.161, 2.162, 2.162, 2.163, 2.159, 2.154, 2.149, 2.144, 2.138,
+ 2.136, 2.136, 2.137, 2.143, 2.149, 2.156, 2.162, 2.163, 2.162, 2.163, 2.164, 2.161, 2.157, 2.152, 2.146, 2.138,
+ 2.137, 2.137, 2.141, 2.147, 2.152, 2.157, 2.162, 2.162, 2.159, 2.161, 2.162, 2.162, 2.157, 2.152, 2.148, 2.148
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 1.463, 1.464, 1.471, 1.478, 1.479, 1.483, 1.484, 1.486, 1.486, 1.484, 1.483, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.463, 1.463, 1.468, 1.476, 1.479, 1.482, 1.484, 1.487, 1.486, 1.484, 1.483, 1.482, 1.478, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.468, 1.476, 1.479, 1.483, 1.484, 1.486, 1.486, 1.485, 1.484, 1.482, 1.477, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.483, 1.485, 1.487, 1.487, 1.485, 1.485, 1.482, 1.478, 1.474, 1.469, 1.468,
+ 1.465, 1.465, 1.471, 1.478, 1.481, 1.484, 1.486, 1.488, 1.488, 1.487, 1.485, 1.482, 1.477, 1.472, 1.468, 1.467,
+ 1.465, 1.466, 1.472, 1.479, 1.482, 1.485, 1.486, 1.488, 1.488, 1.486, 1.484, 1.479, 1.475, 1.472, 1.468, 1.466,
+ 1.466, 1.466, 1.472, 1.478, 1.482, 1.484, 1.485, 1.488, 1.487, 1.485, 1.483, 1.479, 1.475, 1.472, 1.469, 1.468,
+ 1.465, 1.466, 1.469, 1.476, 1.481, 1.485, 1.485, 1.486, 1.486, 1.485, 1.483, 1.479, 1.477, 1.474, 1.471, 1.469,
+ 1.464, 1.465, 1.469, 1.476, 1.481, 1.484, 1.485, 1.487, 1.487, 1.486, 1.485, 1.481, 1.478, 1.475, 1.471, 1.469,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.485, 1.485, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.464, 1.465, 1.471, 1.478, 1.482, 1.486, 1.486, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.472, 1.468,
+ 1.465, 1.466, 1.472, 1.481, 1.483, 1.487, 1.487, 1.488, 1.488, 1.486, 1.485, 1.481, 1.479, 1.476, 1.473, 1.472
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 1.443, 1.444, 1.448, 1.453, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.466, 1.462, 1.457, 1.454, 1.451,
+ 1.443, 1.444, 1.445, 1.451, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.451,
+ 1.444, 1.444, 1.445, 1.451, 1.459, 1.463, 1.466, 1.468, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.444, 1.447, 1.452, 1.459, 1.464, 1.467, 1.469, 1.471, 1.469, 1.467, 1.466, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.445, 1.448, 1.452, 1.459, 1.465, 1.469, 1.471, 1.471, 1.471, 1.468, 1.465, 1.461, 1.455, 1.451, 1.449,
+ 1.445, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.471, 1.472, 1.469, 1.467, 1.465, 1.459, 1.455, 1.451, 1.447,
+ 1.446, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.459, 1.455, 1.452, 1.449,
+ 1.446, 1.446, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.461, 1.457, 1.454, 1.451,
+ 1.444, 1.444, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.471, 1.471, 1.468, 1.466, 1.462, 1.458, 1.454, 1.452,
+ 1.444, 1.444, 1.448, 1.453, 1.459, 1.466, 1.469, 1.471, 1.472, 1.472, 1.468, 1.466, 1.462, 1.458, 1.454, 1.449,
+ 1.446, 1.447, 1.449, 1.454, 1.461, 1.466, 1.471, 1.471, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.455, 1.449,
+ 1.447, 1.447, 1.452, 1.457, 1.462, 1.468, 1.472, 1.472, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.456, 1.455
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.548, 1.499, 1.387, 1.289, 1.223, 1.183, 1.164, 1.154, 1.153, 1.169, 1.211, 1.265, 1.345, 1.448, 1.581, 1.619,
+ 1.513, 1.412, 1.307, 1.228, 1.169, 1.129, 1.105, 1.098, 1.103, 1.127, 1.157, 1.209, 1.272, 1.361, 1.481, 1.583,
+ 1.449, 1.365, 1.257, 1.175, 1.124, 1.085, 1.062, 1.054, 1.059, 1.079, 1.113, 1.151, 1.211, 1.293, 1.407, 1.488,
+ 1.424, 1.324, 1.222, 1.139, 1.089, 1.056, 1.034, 1.031, 1.034, 1.049, 1.075, 1.115, 1.164, 1.241, 1.351, 1.446,
+ 1.412, 1.297, 1.203, 1.119, 1.069, 1.039, 1.021, 1.016, 1.022, 1.032, 1.052, 1.086, 1.135, 1.212, 1.321, 1.439,
+ 1.406, 1.287, 1.195, 1.115, 1.059, 1.028, 1.014, 1.012, 1.015, 1.026, 1.041, 1.074, 1.125, 1.201, 1.302, 1.425,
+ 1.406, 1.294, 1.205, 1.126, 1.062, 1.031, 1.013, 1.009, 1.011, 1.019, 1.042, 1.079, 1.129, 1.203, 1.302, 1.435,
+ 1.415, 1.318, 1.229, 1.146, 1.076, 1.039, 1.019, 1.014, 1.017, 1.031, 1.053, 1.093, 1.144, 1.219, 1.314, 1.436,
+ 1.435, 1.348, 1.246, 1.164, 1.094, 1.059, 1.036, 1.032, 1.037, 1.049, 1.072, 1.114, 1.167, 1.257, 1.343, 1.462,
+ 1.471, 1.385, 1.278, 1.189, 1.124, 1.084, 1.064, 1.061, 1.069, 1.078, 1.101, 1.146, 1.207, 1.298, 1.415, 1.496,
+ 1.522, 1.436, 1.323, 1.228, 1.169, 1.118, 1.101, 1.094, 1.099, 1.113, 1.146, 1.194, 1.265, 1.353, 1.474, 1.571,
+ 1.578, 1.506, 1.378, 1.281, 1.211, 1.156, 1.135, 1.134, 1.139, 1.158, 1.194, 1.251, 1.327, 1.427, 1.559, 1.611
+ ],
+ "sigma": 0.00121,
+ "sigma_Cb": 0.00115
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2360,
+ "ccm":
+ [
+ 1.66078, -0.23588, -0.42491,
+ -0.47456, 1.82763, -0.35307,
+ -0.00545, -1.44729, 2.45273
+ ]
+ },
+ {
+ "ct": 2870,
+ "ccm":
+ [
+ 1.78373, -0.55344, -0.23029,
+ -0.39951, 1.69701, -0.29751,
+ 0.01986, -1.06525, 2.04539
+ ]
+ },
+ {
+ "ct": 2970,
+ "ccm":
+ [
+ 1.73511, -0.56973, -0.16537,
+ -0.36338, 1.69878, -0.33539,
+ -0.02354, -0.76813, 1.79168
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 2.06374, -0.92218, -0.14156,
+ -0.41721, 1.69289, -0.27568,
+ -0.00554, -0.92741, 1.93295
+ ]
+ },
+ {
+ "ct": 3700,
+ "ccm":
+ [
+ 2.13792, -1.08136, -0.05655,
+ -0.34739, 1.58989, -0.24249,
+ -0.00349, -0.76789, 1.77138
+ ]
+ },
+ {
+ "ct": 3870,
+ "ccm":
+ [
+ 1.83834, -0.70528, -0.13307,
+ -0.30499, 1.60523, -0.30024,
+ -0.05701, -0.58313, 1.64014
+ ]
+ },
+ {
+ "ct": 4000,
+ "ccm":
+ [
+ 2.15741, -1.10295, -0.05447,
+ -0.34631, 1.61158, -0.26528,
+ -0.02723, -0.70288, 1.73011
+ ]
+ },
+ {
+ "ct": 4400,
+ "ccm":
+ [
+ 2.05729, -0.95007, -0.10723,
+ -0.41712, 1.78606, -0.36894,
+ -0.11899, -0.55727, 1.67626
+ ]
+ },
+ {
+ "ct": 4715,
+ "ccm":
+ [
+ 1.90255, -0.77478, -0.12777,
+ -0.31338, 1.88197, -0.56858,
+ -0.06001, -0.61785, 1.67786
+ ]
+ },
+ {
+ "ct": 5920,
+ "ccm":
+ [
+ 1.98691, -0.84671, -0.14019,
+ -0.26581, 1.70615, -0.44035,
+ -0.09532, -0.47332, 1.56864
+ ]
+ },
+ {
+ "ct": 9050,
+ "ccm":
+ [
+ 2.09255, -0.76541, -0.32714,
+ -0.28973, 2.27462, -0.98489,
+ -0.17299, -0.61275, 1.78574
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx519.json b/src/ipa/rpi/vc4/data/imx519.json
new file mode 100644
index 00000000..ce194256
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx519.json
@@ -0,0 +1,427 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 13841,
+ "reference_gain": 2.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 900,
+ "reference_Y": 12064
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.776
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 189,
+ "slope": 0.01495
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 7900
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8000
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2890.0, 0.7328, 0.3734,
+ 3550.0, 0.6228, 0.4763,
+ 4500.0, 0.5208, 0.5825,
+ 5700.0, 0.4467, 0.6671,
+ 7900.0, 0.3858, 0.7411
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.02027,
+ "transverse_neg": 0.01935
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.527, 1.521, 1.508, 1.493, 1.476, 1.455, 1.442, 1.441, 1.441, 1.441, 1.448, 1.467, 1.483, 1.494, 1.503, 1.504,
+ 1.525, 1.513, 1.496, 1.477, 1.461, 1.434, 1.418, 1.409, 1.409, 1.416, 1.429, 1.449, 1.469, 1.485, 1.495, 1.503,
+ 1.517, 1.506, 1.485, 1.461, 1.434, 1.412, 1.388, 1.376, 1.376, 1.386, 1.405, 1.429, 1.449, 1.471, 1.488, 1.495,
+ 1.512, 1.496, 1.471, 1.442, 1.412, 1.388, 1.361, 1.344, 1.344, 1.358, 1.384, 1.405, 1.431, 1.456, 1.479, 1.489,
+ 1.508, 1.488, 1.458, 1.425, 1.393, 1.361, 1.343, 1.322, 1.321, 1.342, 1.358, 1.385, 1.416, 1.445, 1.471, 1.484,
+ 1.507, 1.482, 1.453, 1.418, 1.382, 1.349, 1.322, 1.318, 1.318, 1.321, 1.345, 1.373, 1.405, 1.437, 1.465, 1.483,
+ 1.507, 1.482, 1.453, 1.418, 1.382, 1.349, 1.322, 1.313, 1.313, 1.321, 1.345, 1.373, 1.405, 1.437, 1.465, 1.483,
+ 1.507, 1.485, 1.455, 1.422, 1.387, 1.355, 1.333, 1.319, 1.321, 1.333, 1.351, 1.381, 1.411, 1.441, 1.467, 1.483,
+ 1.508, 1.489, 1.463, 1.432, 1.401, 1.372, 1.355, 1.333, 1.333, 1.351, 1.369, 1.393, 1.422, 1.448, 1.471, 1.484,
+ 1.511, 1.494, 1.472, 1.444, 1.416, 1.398, 1.372, 1.361, 1.361, 1.369, 1.393, 1.411, 1.436, 1.458, 1.477, 1.487,
+ 1.511, 1.496, 1.478, 1.455, 1.436, 1.416, 1.399, 1.391, 1.391, 1.397, 1.411, 1.429, 1.451, 1.466, 1.479, 1.487,
+ 1.511, 1.495, 1.478, 1.462, 1.448, 1.432, 1.419, 1.419, 1.419, 1.419, 1.429, 1.445, 1.459, 1.471, 1.482, 1.487
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 2.581, 2.573, 2.558, 2.539, 2.514, 2.487, 2.473, 2.471, 2.471, 2.471, 2.479, 2.499, 2.517, 2.532, 2.543, 2.544,
+ 2.575, 2.559, 2.539, 2.521, 2.491, 2.458, 2.435, 2.421, 2.421, 2.429, 2.449, 2.477, 2.499, 2.519, 2.534, 2.543,
+ 2.561, 2.549, 2.521, 2.491, 2.457, 2.423, 2.393, 2.375, 2.375, 2.387, 2.412, 2.444, 2.475, 2.499, 2.519, 2.532,
+ 2.552, 2.531, 2.498, 2.459, 2.423, 2.391, 2.349, 2.325, 2.325, 2.344, 2.374, 2.412, 2.444, 2.476, 2.505, 2.519,
+ 2.543, 2.518, 2.479, 2.435, 2.392, 2.349, 2.324, 2.285, 2.283, 2.313, 2.344, 2.374, 2.417, 2.457, 2.489, 2.506,
+ 2.541, 2.511, 2.469, 2.421, 2.372, 2.326, 2.284, 2.277, 2.279, 2.283, 2.313, 2.357, 2.401, 2.443, 2.479, 2.504,
+ 2.541, 2.511, 2.469, 2.421, 2.372, 2.326, 2.284, 2.267, 2.267, 2.281, 2.313, 2.357, 2.401, 2.443, 2.479, 2.504,
+ 2.541, 2.512, 2.472, 2.425, 2.381, 2.338, 2.302, 2.278, 2.279, 2.301, 2.324, 2.364, 2.407, 2.447, 2.481, 2.504,
+ 2.544, 2.519, 2.483, 2.441, 2.401, 2.363, 2.338, 2.302, 2.302, 2.324, 2.355, 2.385, 2.423, 2.459, 2.488, 2.506,
+ 2.549, 2.527, 2.497, 2.463, 2.427, 2.401, 2.363, 2.345, 2.345, 2.355, 2.385, 2.412, 2.444, 2.473, 2.497, 2.509,
+ 2.552, 2.532, 2.507, 2.481, 2.459, 2.427, 2.402, 2.389, 2.389, 2.394, 2.412, 2.444, 2.465, 2.481, 2.499, 2.511,
+ 2.553, 2.533, 2.508, 2.489, 2.475, 2.454, 2.429, 2.429, 2.429, 2.429, 2.439, 2.463, 2.481, 2.492, 2.504, 2.511
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.132, 3.126, 3.116, 3.103, 3.097, 3.091, 3.087, 3.086, 3.088, 3.091, 3.092, 3.102, 3.113, 3.121, 3.141, 3.144,
+ 3.149, 3.132, 3.123, 3.108, 3.101, 3.096, 3.091, 3.089, 3.091, 3.092, 3.101, 3.107, 3.116, 3.129, 3.144, 3.153,
+ 3.161, 3.149, 3.129, 3.121, 3.108, 3.103, 3.101, 3.101, 3.101, 3.103, 3.107, 3.116, 3.125, 3.134, 3.153, 3.159,
+ 3.176, 3.161, 3.144, 3.129, 3.124, 3.121, 3.117, 3.118, 3.118, 3.119, 3.122, 3.125, 3.134, 3.146, 3.159, 3.171,
+ 3.183, 3.176, 3.157, 3.144, 3.143, 3.143, 3.139, 3.141, 3.141, 3.141, 3.141, 3.141, 3.146, 3.161, 3.171, 3.179,
+ 3.189, 3.183, 3.165, 3.157, 3.156, 3.157, 3.159, 3.163, 3.163, 3.163, 3.163, 3.161, 3.163, 3.169, 3.179, 3.187,
+ 3.199, 3.189, 3.171, 3.165, 3.164, 3.167, 3.171, 3.173, 3.173, 3.172, 3.171, 3.169, 3.169, 3.175, 3.187, 3.189,
+ 3.206, 3.196, 3.177, 3.171, 3.165, 3.167, 3.171, 3.173, 3.173, 3.172, 3.171, 3.171, 3.173, 3.177, 3.192, 3.194,
+ 3.209, 3.197, 3.178, 3.171, 3.164, 3.161, 3.159, 3.161, 3.162, 3.164, 3.167, 3.171, 3.173, 3.181, 3.193, 3.198,
+ 3.204, 3.194, 3.176, 3.165, 3.161, 3.156, 3.154, 3.154, 3.159, 3.161, 3.164, 3.168, 3.173, 3.182, 3.198, 3.199,
+ 3.199, 3.191, 3.176, 3.169, 3.161, 3.157, 3.153, 3.153, 3.156, 3.161, 3.164, 3.168, 3.173, 3.186, 3.196, 3.199,
+ 3.199, 3.188, 3.179, 3.173, 3.165, 3.157, 3.153, 3.154, 3.156, 3.159, 3.167, 3.171, 3.176, 3.185, 3.193, 3.198
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.579, 1.579, 1.577, 1.574, 1.573, 1.571, 1.571, 1.571, 1.571, 1.569, 1.569, 1.571, 1.572, 1.574, 1.577, 1.578,
+ 1.584, 1.579, 1.578, 1.575, 1.573, 1.572, 1.571, 1.572, 1.572, 1.571, 1.571, 1.572, 1.573, 1.576, 1.578, 1.579,
+ 1.587, 1.584, 1.579, 1.578, 1.575, 1.573, 1.573, 1.575, 1.575, 1.574, 1.573, 1.574, 1.576, 1.578, 1.581, 1.581,
+ 1.591, 1.587, 1.584, 1.579, 1.578, 1.579, 1.579, 1.581, 1.581, 1.581, 1.578, 1.577, 1.578, 1.581, 1.585, 1.586,
+ 1.595, 1.591, 1.587, 1.585, 1.585, 1.586, 1.587, 1.587, 1.588, 1.588, 1.585, 1.584, 1.584, 1.586, 1.589, 1.589,
+ 1.597, 1.595, 1.591, 1.589, 1.591, 1.593, 1.595, 1.596, 1.597, 1.597, 1.595, 1.594, 1.592, 1.592, 1.593, 1.593,
+ 1.601, 1.597, 1.593, 1.592, 1.593, 1.595, 1.598, 1.599, 1.602, 1.601, 1.598, 1.596, 1.595, 1.596, 1.595, 1.595,
+ 1.601, 1.599, 1.594, 1.593, 1.593, 1.595, 1.598, 1.599, 1.602, 1.601, 1.598, 1.597, 1.597, 1.597, 1.597, 1.597,
+ 1.602, 1.599, 1.594, 1.593, 1.592, 1.593, 1.595, 1.597, 1.597, 1.598, 1.598, 1.597, 1.597, 1.597, 1.598, 1.598,
+ 1.599, 1.598, 1.594, 1.592, 1.591, 1.591, 1.592, 1.595, 1.596, 1.597, 1.597, 1.597, 1.597, 1.599, 1.599, 1.599,
+ 1.598, 1.596, 1.594, 1.593, 1.592, 1.592, 1.592, 1.594, 1.595, 1.597, 1.597, 1.597, 1.598, 1.599, 1.599, 1.599,
+ 1.597, 1.595, 1.594, 1.594, 1.593, 1.592, 1.593, 1.595, 1.595, 1.597, 1.598, 1.598, 1.598, 1.599, 1.599, 1.599
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.887, 2.754, 2.381, 2.105, 1.859, 1.678, 1.625, 1.623, 1.623, 1.624, 1.669, 1.849, 2.092, 2.362, 2.723, 2.838,
+ 2.754, 2.443, 2.111, 1.905, 1.678, 1.542, 1.455, 1.412, 1.412, 1.452, 1.535, 1.665, 1.893, 2.096, 2.413, 2.723,
+ 2.443, 2.216, 1.911, 1.678, 1.537, 1.372, 1.288, 1.245, 1.245, 1.283, 1.363, 1.527, 1.665, 1.895, 2.193, 2.413,
+ 2.318, 2.057, 1.764, 1.541, 1.372, 1.282, 1.159, 1.113, 1.113, 1.151, 1.269, 1.363, 1.527, 1.749, 2.034, 2.278,
+ 2.259, 1.953, 1.671, 1.452, 1.283, 1.159, 1.107, 1.018, 1.017, 1.097, 1.151, 1.269, 1.437, 1.655, 1.931, 2.222,
+ 2.257, 1.902, 1.624, 1.408, 1.239, 1.111, 1.019, 1.011, 1.005, 1.014, 1.098, 1.227, 1.395, 1.608, 1.883, 2.222,
+ 2.257, 1.902, 1.624, 1.408, 1.239, 1.111, 1.016, 1.001, 1.001, 1.007, 1.098, 1.227, 1.395, 1.608, 1.883, 2.222,
+ 2.257, 1.946, 1.666, 1.448, 1.281, 1.153, 1.093, 1.013, 1.008, 1.089, 1.143, 1.269, 1.437, 1.654, 1.934, 2.226,
+ 2.309, 2.044, 1.756, 1.532, 1.363, 1.259, 1.153, 1.093, 1.093, 1.143, 1.264, 1.354, 1.524, 1.746, 2.035, 2.284,
+ 2.425, 2.201, 1.896, 1.662, 1.519, 1.363, 1.259, 1.214, 1.214, 1.264, 1.354, 1.519, 1.655, 1.888, 2.191, 2.413,
+ 2.724, 2.417, 2.091, 1.888, 1.662, 1.519, 1.419, 1.373, 1.373, 1.425, 1.521, 1.655, 1.885, 2.089, 2.409, 2.722,
+ 2.858, 2.724, 2.356, 2.085, 1.842, 1.658, 1.581, 1.577, 1.577, 1.579, 1.653, 1.838, 2.084, 2.359, 2.722, 2.842
+ ],
+ "sigma": 0.00372,
+ "sigma_Cb": 0.00244
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2890,
+ "ccm":
+ [
+ 1.36754, -0.18448, -0.18306,
+ -0.32356, 1.44826, -0.12471,
+ -0.00412, -0.69936, 1.70348
+ ]
+ },
+ {
+ "ct": 2920,
+ "ccm":
+ [
+ 1.26704, 0.01624, -0.28328,
+ -0.28516, 1.38934, -0.10419,
+ -0.04854, -0.82211, 1.87066
+ ]
+ },
+ {
+ "ct": 3550,
+ "ccm":
+ [
+ 1.42836, -0.27235, -0.15601,
+ -0.28751, 1.41075, -0.12325,
+ -0.01812, -0.54849, 1.56661
+ ]
+ },
+ {
+ "ct": 4500,
+ "ccm":
+ [
+ 1.36328, -0.19569, -0.16759,
+ -0.25254, 1.52248, -0.26994,
+ -0.01575, -0.53155, 1.54729
+ ]
+ },
+ {
+ "ct": 5700,
+ "ccm":
+ [
+ 1.49207, -0.37245, -0.11963,
+ -0.21493, 1.40005, -0.18512,
+ -0.03781, -0.38779, 1.42561
+ ]
+ },
+ {
+ "ct": 7900,
+ "ccm":
+ [
+ 1.34849, -0.05425, -0.29424,
+ -0.22182, 1.77684, -0.55502,
+ -0.07403, -0.55336, 1.62739
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708.json b/src/ipa/rpi/vc4/data/imx708.json
new file mode 100644
index 00000000..4de6f079
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx708.json
@@ -0,0 +1,671 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 10672,
+ "reference_gain": 1.12,
+ "reference_aperture": 1.0,
+ "reference_lux": 977,
+ "reference_Y": 8627
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 16.0,
+ "reference_slope": 4.0
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.00287
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2498.0, 0.8733, 0.2606,
+ 2821.0, 0.7707, 0.3245,
+ 2925.0, 0.7338, 0.3499,
+ 2926.0, 0.7193, 0.3603,
+ 2951.0, 0.7144, 0.3639,
+ 2954.0, 0.7111, 0.3663,
+ 3578.0, 0.6038, 0.4516,
+ 3717.0, 0.5861, 0.4669,
+ 3784.0, 0.5786, 0.4737,
+ 4485.0, 0.5113, 0.5368,
+ 4615.0, 0.4994, 0.5486,
+ 4671.0, 0.4927, 0.5554,
+ 5753.0, 0.4274, 0.6246,
+ 5773.0, 0.4265, 0.6256,
+ 7433.0, 0.3723, 0.6881
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.03148,
+ "transverse_neg": 0.03061
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.562, 1.566, 1.566, 1.556, 1.533, 1.506, 1.475, 1.475, 1.475, 1.475, 1.506, 1.533, 1.555, 1.563, 1.562, 1.555,
+ 1.563, 1.564, 1.561, 1.538, 1.508, 1.482, 1.449, 1.436, 1.436, 1.449, 1.481, 1.508, 1.537, 1.557, 1.558, 1.557,
+ 1.564, 1.563, 1.554, 1.522, 1.482, 1.449, 1.421, 1.403, 1.403, 1.419, 1.449, 1.481, 1.519, 1.549, 1.557, 1.559,
+ 1.564, 1.563, 1.545, 1.506, 1.462, 1.421, 1.403, 1.378, 1.378, 1.402, 1.419, 1.459, 1.503, 1.541, 1.557, 1.559,
+ 1.564, 1.562, 1.537, 1.494, 1.447, 1.404, 1.378, 1.364, 1.364, 1.377, 1.402, 1.444, 1.491, 1.532, 1.556, 1.559,
+ 1.564, 1.559, 1.532, 1.487, 1.438, 1.395, 1.365, 1.359, 1.359, 1.364, 1.393, 1.436, 1.484, 1.527, 1.555, 1.558,
+ 1.564, 1.559, 1.532, 1.487, 1.438, 1.395, 1.365, 1.356, 1.356, 1.364, 1.393, 1.436, 1.484, 1.527, 1.554, 1.557,
+ 1.564, 1.561, 1.536, 1.492, 1.444, 1.402, 1.374, 1.364, 1.363, 1.373, 1.401, 1.442, 1.489, 1.531, 1.554, 1.557,
+ 1.564, 1.563, 1.544, 1.504, 1.458, 1.418, 1.397, 1.374, 1.374, 1.395, 1.416, 1.456, 1.501, 1.538, 1.556, 1.557,
+ 1.564, 1.562, 1.551, 1.518, 1.477, 1.441, 1.418, 1.397, 1.397, 1.416, 1.438, 1.474, 1.514, 1.546, 1.556, 1.556,
+ 1.562, 1.562, 1.558, 1.534, 1.499, 1.476, 1.441, 1.426, 1.426, 1.438, 1.473, 1.496, 1.531, 1.552, 1.556, 1.555,
+ 1.561, 1.564, 1.564, 1.552, 1.525, 1.497, 1.466, 1.461, 1.461, 1.464, 1.495, 1.523, 1.548, 1.556, 1.556, 1.552
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 2.609, 2.616, 2.617, 2.607, 2.573, 2.527, 2.483, 2.481, 2.481, 2.483, 2.529, 2.573, 2.604, 2.613, 2.613, 2.604,
+ 2.609, 2.615, 2.608, 2.576, 2.533, 2.489, 2.439, 2.418, 2.418, 2.439, 2.491, 2.532, 2.577, 2.605, 2.609, 2.607,
+ 2.611, 2.611, 2.597, 2.551, 2.489, 2.439, 2.391, 2.364, 2.364, 2.391, 2.439, 2.491, 2.551, 2.592, 2.607, 2.609,
+ 2.612, 2.608, 2.583, 2.526, 2.457, 2.391, 2.362, 2.318, 2.318, 2.362, 2.391, 2.458, 2.526, 2.581, 2.607, 2.611,
+ 2.612, 2.604, 2.571, 2.507, 2.435, 2.362, 2.317, 2.293, 2.294, 2.318, 2.363, 2.434, 2.508, 2.568, 2.604, 2.612,
+ 2.611, 2.602, 2.564, 2.496, 2.419, 2.349, 2.293, 2.284, 2.284, 2.294, 2.347, 2.421, 2.497, 2.562, 2.603, 2.611,
+ 2.609, 2.601, 2.564, 2.496, 2.419, 2.349, 2.293, 2.278, 2.278, 2.294, 2.347, 2.421, 2.497, 2.562, 2.602, 2.609,
+ 2.609, 2.602, 2.568, 2.503, 2.429, 2.361, 2.311, 2.292, 2.292, 2.309, 2.357, 2.429, 2.504, 2.567, 2.602, 2.609,
+ 2.606, 2.604, 2.579, 2.519, 2.449, 2.384, 2.348, 2.311, 2.311, 2.346, 2.383, 2.449, 2.521, 2.577, 2.604, 2.608,
+ 2.604, 2.603, 2.586, 2.537, 2.474, 2.418, 2.384, 2.348, 2.348, 2.383, 2.417, 2.476, 2.538, 2.586, 2.601, 2.603,
+ 2.603, 2.605, 2.596, 2.561, 2.508, 2.474, 2.418, 2.396, 2.396, 2.417, 2.474, 2.511, 2.562, 2.596, 2.603, 2.602,
+ 2.601, 2.607, 2.606, 2.589, 2.549, 2.507, 2.456, 2.454, 2.454, 2.458, 2.508, 2.554, 2.594, 2.605, 2.605, 2.602
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.221, 3.226, 3.231, 3.236, 3.239, 3.243, 3.245, 3.247, 3.249, 3.253, 3.255, 3.254, 3.253, 3.242, 3.235, 3.226,
+ 3.225, 3.231, 3.235, 3.238, 3.241, 3.244, 3.246, 3.247, 3.249, 3.254, 3.256, 3.255, 3.252, 3.248, 3.241, 3.232,
+ 3.226, 3.234, 3.239, 3.243, 3.243, 3.245, 3.247, 3.248, 3.251, 3.255, 3.256, 3.256, 3.254, 3.249, 3.244, 3.236,
+ 3.232, 3.238, 3.245, 3.245, 3.246, 3.247, 3.248, 3.251, 3.251, 3.256, 3.257, 3.257, 3.256, 3.254, 3.249, 3.239,
+ 3.232, 3.243, 3.246, 3.246, 3.246, 3.247, 3.248, 3.251, 3.253, 3.257, 3.258, 3.258, 3.257, 3.256, 3.254, 3.239,
+ 3.232, 3.242, 3.246, 3.247, 3.246, 3.246, 3.248, 3.251, 3.252, 3.253, 3.256, 3.255, 3.255, 3.254, 3.251, 3.239,
+ 3.233, 3.241, 3.244, 3.245, 3.244, 3.245, 3.246, 3.249, 3.251, 3.252, 3.253, 3.252, 3.252, 3.252, 3.249, 3.238,
+ 3.238, 3.241, 3.246, 3.246, 3.245, 3.245, 3.247, 3.249, 3.251, 3.252, 3.253, 3.253, 3.252, 3.252, 3.249, 3.239,
+ 3.235, 3.241, 3.245, 3.245, 3.245, 3.245, 3.246, 3.247, 3.251, 3.254, 3.253, 3.255, 3.256, 3.255, 3.251, 3.241,
+ 3.226, 3.235, 3.241, 3.241, 3.241, 3.241, 3.243, 3.245, 3.246, 3.252, 3.253, 3.254, 3.256, 3.254, 3.241, 3.237,
+ 3.205, 3.213, 3.213, 3.214, 3.214, 3.214, 3.214, 3.213, 3.213, 3.216, 3.218, 3.216, 3.214, 3.213, 3.211, 3.208,
+ 3.205, 3.205, 3.212, 3.212, 3.212, 3.213, 3.211, 3.211, 3.211, 3.213, 3.216, 3.214, 3.213, 3.211, 3.208, 3.196
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.645, 1.646, 1.649, 1.653, 1.654, 1.657, 1.659, 1.661, 1.663, 1.662, 1.661, 1.659, 1.656, 1.651, 1.645, 1.642,
+ 1.646, 1.649, 1.652, 1.654, 1.656, 1.659, 1.662, 1.663, 1.664, 1.664, 1.662, 1.661, 1.657, 1.653, 1.649, 1.644,
+ 1.648, 1.652, 1.654, 1.656, 1.658, 1.662, 1.665, 1.668, 1.668, 1.668, 1.665, 1.662, 1.658, 1.655, 1.652, 1.646,
+ 1.649, 1.653, 1.656, 1.658, 1.661, 1.665, 1.667, 1.671, 1.673, 1.671, 1.668, 1.663, 1.659, 1.656, 1.654, 1.647,
+ 1.649, 1.655, 1.657, 1.659, 1.661, 1.666, 1.671, 1.674, 1.675, 1.673, 1.671, 1.664, 1.659, 1.656, 1.654, 1.648,
+ 1.649, 1.654, 1.656, 1.659, 1.661, 1.666, 1.673, 1.676, 1.676, 1.675, 1.671, 1.664, 1.659, 1.656, 1.654, 1.648,
+ 1.649, 1.654, 1.656, 1.658, 1.659, 1.665, 1.672, 1.675, 1.675, 1.674, 1.668, 1.662, 1.658, 1.655, 1.654, 1.646,
+ 1.652, 1.655, 1.657, 1.659, 1.661, 1.665, 1.671, 1.673, 1.673, 1.672, 1.668, 1.662, 1.658, 1.655, 1.654, 1.647,
+ 1.652, 1.655, 1.657, 1.659, 1.661, 1.664, 1.667, 1.671, 1.672, 1.668, 1.666, 1.662, 1.659, 1.656, 1.654, 1.647,
+ 1.647, 1.652, 1.655, 1.656, 1.657, 1.661, 1.664, 1.665, 1.665, 1.665, 1.663, 1.661, 1.657, 1.655, 1.647, 1.647,
+ 1.639, 1.642, 1.644, 1.645, 1.646, 1.648, 1.648, 1.648, 1.649, 1.649, 1.649, 1.646, 1.645, 1.642, 1.639, 1.636,
+ 1.639, 1.641, 1.642, 1.644, 1.645, 1.646, 1.647, 1.647, 1.648, 1.648, 1.647, 1.645, 1.642, 1.639, 1.636, 1.633
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.644, 2.396, 2.077, 1.863, 1.682, 1.535, 1.392, 1.382, 1.382, 1.382, 1.515, 1.657, 1.826, 2.035, 2.351, 2.604,
+ 2.497, 2.229, 1.947, 1.733, 1.539, 1.424, 1.296, 1.249, 1.249, 1.285, 1.401, 1.519, 1.699, 1.908, 2.183, 2.456,
+ 2.389, 2.109, 1.848, 1.622, 1.424, 1.296, 1.201, 1.146, 1.146, 1.188, 1.285, 1.401, 1.591, 1.811, 2.065, 2.347,
+ 2.317, 2.026, 1.771, 1.535, 1.339, 1.201, 1.145, 1.069, 1.069, 1.134, 1.188, 1.318, 1.505, 1.734, 1.983, 2.273,
+ 2.276, 1.972, 1.715, 1.474, 1.281, 1.148, 1.069, 1.033, 1.024, 1.065, 1.134, 1.262, 1.446, 1.679, 1.929, 2.233,
+ 2.268, 1.941, 1.682, 1.441, 1.251, 1.119, 1.033, 1.013, 1.013, 1.024, 1.105, 1.231, 1.415, 1.649, 1.898, 2.227,
+ 2.268, 1.941, 1.682, 1.441, 1.251, 1.119, 1.033, 1.001, 1.001, 1.024, 1.105, 1.231, 1.415, 1.649, 1.898, 2.227,
+ 2.268, 1.951, 1.694, 1.456, 1.265, 1.131, 1.044, 1.026, 1.019, 1.039, 1.118, 1.246, 1.429, 1.663, 1.912, 2.227,
+ 2.291, 1.992, 1.738, 1.505, 1.311, 1.175, 1.108, 1.044, 1.041, 1.106, 1.161, 1.292, 1.478, 1.707, 1.955, 2.252,
+ 2.347, 2.058, 1.803, 1.581, 1.384, 1.245, 1.175, 1.108, 1.108, 1.161, 1.239, 1.364, 1.551, 1.773, 2.023, 2.311,
+ 2.438, 2.156, 1.884, 1.674, 1.484, 1.373, 1.245, 1.199, 1.199, 1.239, 1.363, 1.463, 1.647, 1.858, 2.123, 2.406,
+ 2.563, 2.305, 1.998, 1.792, 1.615, 1.472, 1.339, 1.322, 1.322, 1.326, 1.456, 1.593, 1.767, 1.973, 2.273, 2.532
+ ],
+ "sigma": 0.00178,
+ "sigma_Cb": 0.00217
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2964,
+ "ccm":
+ [
+ 1.72129, -0.45961, -0.26169,
+ -0.30042, 1.56924, -0.26882,
+ 0.15133, -1.13293, 1.98161
+ ]
+ },
+ {
+ "ct": 3610,
+ "ccm":
+ [
+ 1.54474, -0.35082, -0.19391,
+ -0.36989, 1.67926, -0.30936,
+ -0.00524, -0.55197, 1.55722
+ ]
+ },
+ {
+ "ct": 4640,
+ "ccm":
+ [
+ 1.52972, -0.35168, -0.17804,
+ -0.28309, 1.67098, -0.38788,
+ 0.01695, -0.57209, 1.55515
+ ]
+ },
+ {
+ "ct": 5910,
+ "ccm":
+ [
+ 1.56879, -0.42159, -0.14719,
+ -0.27275, 1.59354, -0.32079,
+ -0.02862, -0.40662, 1.43525
+ ]
+ },
+ {
+ "ct": 7590,
+ "ccm":
+ [
+ 1.41424, -0.21092, -0.20332,
+ -0.17646, 1.71734, -0.54087,
+ 0.01297, -0.63111, 1.61814
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 3.0,
+ "max": 15.0,
+ "default": 4.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 1.0,
+ "step_fine": 0.25,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.02,
+ "pdaf_squelch": 0.125,
+ "max_slew": 2.0,
+ "pdaf_frames": 20,
+ "dropout_frames": 6,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 16,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 445, 15.0, 925 ]
+ }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_noir.json b/src/ipa/rpi/vc4/data/imx708_noir.json
new file mode 100644
index 00000000..7b7ee874
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx708_noir.json
@@ -0,0 +1,770 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 10672,
+ "reference_gain": 1.12,
+ "reference_aperture": 1.0,
+ "reference_lux": 977,
+ "reference_Y": 8627
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 16.0,
+ "reference_slope": 4.0
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.00287
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 0,
+ "ct_curve":
+ [
+ 2498.0, 0.8733, 0.2606,
+ 2821.0, 0.7707, 0.3245,
+ 2925.0, 0.7338, 0.3499,
+ 2926.0, 0.7193, 0.3603,
+ 2951.0, 0.7144, 0.3639,
+ 2954.0, 0.7111, 0.3663,
+ 3578.0, 0.6038, 0.4516,
+ 3717.0, 0.5861, 0.4669,
+ 3784.0, 0.5786, 0.4737,
+ 4485.0, 0.5113, 0.5368,
+ 4615.0, 0.4994, 0.5486,
+ 4671.0, 0.4927, 0.5554,
+ 5753.0, 0.4274, 0.6246,
+ 5773.0, 0.4265, 0.6256,
+ 7433.0, 0.3723, 0.6881
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.03148,
+ "transverse_neg": 0.03061
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.562, 1.566, 1.566, 1.556, 1.533, 1.506, 1.475, 1.475, 1.475, 1.475, 1.506, 1.533, 1.555, 1.563, 1.562, 1.555,
+ 1.563, 1.564, 1.561, 1.538, 1.508, 1.482, 1.449, 1.436, 1.436, 1.449, 1.481, 1.508, 1.537, 1.557, 1.558, 1.557,
+ 1.564, 1.563, 1.554, 1.522, 1.482, 1.449, 1.421, 1.403, 1.403, 1.419, 1.449, 1.481, 1.519, 1.549, 1.557, 1.559,
+ 1.564, 1.563, 1.545, 1.506, 1.462, 1.421, 1.403, 1.378, 1.378, 1.402, 1.419, 1.459, 1.503, 1.541, 1.557, 1.559,
+ 1.564, 1.562, 1.537, 1.494, 1.447, 1.404, 1.378, 1.364, 1.364, 1.377, 1.402, 1.444, 1.491, 1.532, 1.556, 1.559,
+ 1.564, 1.559, 1.532, 1.487, 1.438, 1.395, 1.365, 1.359, 1.359, 1.364, 1.393, 1.436, 1.484, 1.527, 1.555, 1.558,
+ 1.564, 1.559, 1.532, 1.487, 1.438, 1.395, 1.365, 1.356, 1.356, 1.364, 1.393, 1.436, 1.484, 1.527, 1.554, 1.557,
+ 1.564, 1.561, 1.536, 1.492, 1.444, 1.402, 1.374, 1.364, 1.363, 1.373, 1.401, 1.442, 1.489, 1.531, 1.554, 1.557,
+ 1.564, 1.563, 1.544, 1.504, 1.458, 1.418, 1.397, 1.374, 1.374, 1.395, 1.416, 1.456, 1.501, 1.538, 1.556, 1.557,
+ 1.564, 1.562, 1.551, 1.518, 1.477, 1.441, 1.418, 1.397, 1.397, 1.416, 1.438, 1.474, 1.514, 1.546, 1.556, 1.556,
+ 1.562, 1.562, 1.558, 1.534, 1.499, 1.476, 1.441, 1.426, 1.426, 1.438, 1.473, 1.496, 1.531, 1.552, 1.556, 1.555,
+ 1.561, 1.564, 1.564, 1.552, 1.525, 1.497, 1.466, 1.461, 1.461, 1.464, 1.495, 1.523, 1.548, 1.556, 1.556, 1.552
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 2.609, 2.616, 2.617, 2.607, 2.573, 2.527, 2.483, 2.481, 2.481, 2.483, 2.529, 2.573, 2.604, 2.613, 2.613, 2.604,
+ 2.609, 2.615, 2.608, 2.576, 2.533, 2.489, 2.439, 2.418, 2.418, 2.439, 2.491, 2.532, 2.577, 2.605, 2.609, 2.607,
+ 2.611, 2.611, 2.597, 2.551, 2.489, 2.439, 2.391, 2.364, 2.364, 2.391, 2.439, 2.491, 2.551, 2.592, 2.607, 2.609,
+ 2.612, 2.608, 2.583, 2.526, 2.457, 2.391, 2.362, 2.318, 2.318, 2.362, 2.391, 2.458, 2.526, 2.581, 2.607, 2.611,
+ 2.612, 2.604, 2.571, 2.507, 2.435, 2.362, 2.317, 2.293, 2.294, 2.318, 2.363, 2.434, 2.508, 2.568, 2.604, 2.612,
+ 2.611, 2.602, 2.564, 2.496, 2.419, 2.349, 2.293, 2.284, 2.284, 2.294, 2.347, 2.421, 2.497, 2.562, 2.603, 2.611,
+ 2.609, 2.601, 2.564, 2.496, 2.419, 2.349, 2.293, 2.278, 2.278, 2.294, 2.347, 2.421, 2.497, 2.562, 2.602, 2.609,
+ 2.609, 2.602, 2.568, 2.503, 2.429, 2.361, 2.311, 2.292, 2.292, 2.309, 2.357, 2.429, 2.504, 2.567, 2.602, 2.609,
+ 2.606, 2.604, 2.579, 2.519, 2.449, 2.384, 2.348, 2.311, 2.311, 2.346, 2.383, 2.449, 2.521, 2.577, 2.604, 2.608,
+ 2.604, 2.603, 2.586, 2.537, 2.474, 2.418, 2.384, 2.348, 2.348, 2.383, 2.417, 2.476, 2.538, 2.586, 2.601, 2.603,
+ 2.603, 2.605, 2.596, 2.561, 2.508, 2.474, 2.418, 2.396, 2.396, 2.417, 2.474, 2.511, 2.562, 2.596, 2.603, 2.602,
+ 2.601, 2.607, 2.606, 2.589, 2.549, 2.507, 2.456, 2.454, 2.454, 2.458, 2.508, 2.554, 2.594, 2.605, 2.605, 2.602
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.221, 3.226, 3.231, 3.236, 3.239, 3.243, 3.245, 3.247, 3.249, 3.253, 3.255, 3.254, 3.253, 3.242, 3.235, 3.226,
+ 3.225, 3.231, 3.235, 3.238, 3.241, 3.244, 3.246, 3.247, 3.249, 3.254, 3.256, 3.255, 3.252, 3.248, 3.241, 3.232,
+ 3.226, 3.234, 3.239, 3.243, 3.243, 3.245, 3.247, 3.248, 3.251, 3.255, 3.256, 3.256, 3.254, 3.249, 3.244, 3.236,
+ 3.232, 3.238, 3.245, 3.245, 3.246, 3.247, 3.248, 3.251, 3.251, 3.256, 3.257, 3.257, 3.256, 3.254, 3.249, 3.239,
+ 3.232, 3.243, 3.246, 3.246, 3.246, 3.247, 3.248, 3.251, 3.253, 3.257, 3.258, 3.258, 3.257, 3.256, 3.254, 3.239,
+ 3.232, 3.242, 3.246, 3.247, 3.246, 3.246, 3.248, 3.251, 3.252, 3.253, 3.256, 3.255, 3.255, 3.254, 3.251, 3.239,
+ 3.233, 3.241, 3.244, 3.245, 3.244, 3.245, 3.246, 3.249, 3.251, 3.252, 3.253, 3.252, 3.252, 3.252, 3.249, 3.238,
+ 3.238, 3.241, 3.246, 3.246, 3.245, 3.245, 3.247, 3.249, 3.251, 3.252, 3.253, 3.253, 3.252, 3.252, 3.249, 3.239,
+ 3.235, 3.241, 3.245, 3.245, 3.245, 3.245, 3.246, 3.247, 3.251, 3.254, 3.253, 3.255, 3.256, 3.255, 3.251, 3.241,
+ 3.226, 3.235, 3.241, 3.241, 3.241, 3.241, 3.243, 3.245, 3.246, 3.252, 3.253, 3.254, 3.256, 3.254, 3.241, 3.237,
+ 3.205, 3.213, 3.213, 3.214, 3.214, 3.214, 3.214, 3.213, 3.213, 3.216, 3.218, 3.216, 3.214, 3.213, 3.211, 3.208,
+ 3.205, 3.205, 3.212, 3.212, 3.212, 3.213, 3.211, 3.211, 3.211, 3.213, 3.216, 3.214, 3.213, 3.211, 3.208, 3.196
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.645, 1.646, 1.649, 1.653, 1.654, 1.657, 1.659, 1.661, 1.663, 1.662, 1.661, 1.659, 1.656, 1.651, 1.645, 1.642,
+ 1.646, 1.649, 1.652, 1.654, 1.656, 1.659, 1.662, 1.663, 1.664, 1.664, 1.662, 1.661, 1.657, 1.653, 1.649, 1.644,
+ 1.648, 1.652, 1.654, 1.656, 1.658, 1.662, 1.665, 1.668, 1.668, 1.668, 1.665, 1.662, 1.658, 1.655, 1.652, 1.646,
+ 1.649, 1.653, 1.656, 1.658, 1.661, 1.665, 1.667, 1.671, 1.673, 1.671, 1.668, 1.663, 1.659, 1.656, 1.654, 1.647,
+ 1.649, 1.655, 1.657, 1.659, 1.661, 1.666, 1.671, 1.674, 1.675, 1.673, 1.671, 1.664, 1.659, 1.656, 1.654, 1.648,
+ 1.649, 1.654, 1.656, 1.659, 1.661, 1.666, 1.673, 1.676, 1.676, 1.675, 1.671, 1.664, 1.659, 1.656, 1.654, 1.648,
+ 1.649, 1.654, 1.656, 1.658, 1.659, 1.665, 1.672, 1.675, 1.675, 1.674, 1.668, 1.662, 1.658, 1.655, 1.654, 1.646,
+ 1.652, 1.655, 1.657, 1.659, 1.661, 1.665, 1.671, 1.673, 1.673, 1.672, 1.668, 1.662, 1.658, 1.655, 1.654, 1.647,
+ 1.652, 1.655, 1.657, 1.659, 1.661, 1.664, 1.667, 1.671, 1.672, 1.668, 1.666, 1.662, 1.659, 1.656, 1.654, 1.647,
+ 1.647, 1.652, 1.655, 1.656, 1.657, 1.661, 1.664, 1.665, 1.665, 1.665, 1.663, 1.661, 1.657, 1.655, 1.647, 1.647,
+ 1.639, 1.642, 1.644, 1.645, 1.646, 1.648, 1.648, 1.648, 1.649, 1.649, 1.649, 1.646, 1.645, 1.642, 1.639, 1.636,
+ 1.639, 1.641, 1.642, 1.644, 1.645, 1.646, 1.647, 1.647, 1.648, 1.648, 1.647, 1.645, 1.642, 1.639, 1.636, 1.633
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.644, 2.396, 2.077, 1.863, 1.682, 1.535, 1.392, 1.382, 1.382, 1.382, 1.515, 1.657, 1.826, 2.035, 2.351, 2.604,
+ 2.497, 2.229, 1.947, 1.733, 1.539, 1.424, 1.296, 1.249, 1.249, 1.285, 1.401, 1.519, 1.699, 1.908, 2.183, 2.456,
+ 2.389, 2.109, 1.848, 1.622, 1.424, 1.296, 1.201, 1.146, 1.146, 1.188, 1.285, 1.401, 1.591, 1.811, 2.065, 2.347,
+ 2.317, 2.026, 1.771, 1.535, 1.339, 1.201, 1.145, 1.069, 1.069, 1.134, 1.188, 1.318, 1.505, 1.734, 1.983, 2.273,
+ 2.276, 1.972, 1.715, 1.474, 1.281, 1.148, 1.069, 1.033, 1.024, 1.065, 1.134, 1.262, 1.446, 1.679, 1.929, 2.233,
+ 2.268, 1.941, 1.682, 1.441, 1.251, 1.119, 1.033, 1.013, 1.013, 1.024, 1.105, 1.231, 1.415, 1.649, 1.898, 2.227,
+ 2.268, 1.941, 1.682, 1.441, 1.251, 1.119, 1.033, 1.001, 1.001, 1.024, 1.105, 1.231, 1.415, 1.649, 1.898, 2.227,
+ 2.268, 1.951, 1.694, 1.456, 1.265, 1.131, 1.044, 1.026, 1.019, 1.039, 1.118, 1.246, 1.429, 1.663, 1.912, 2.227,
+ 2.291, 1.992, 1.738, 1.505, 1.311, 1.175, 1.108, 1.044, 1.041, 1.106, 1.161, 1.292, 1.478, 1.707, 1.955, 2.252,
+ 2.347, 2.058, 1.803, 1.581, 1.384, 1.245, 1.175, 1.108, 1.108, 1.161, 1.239, 1.364, 1.551, 1.773, 2.023, 2.311,
+ 2.438, 2.156, 1.884, 1.674, 1.484, 1.373, 1.245, 1.199, 1.199, 1.239, 1.363, 1.463, 1.647, 1.858, 2.123, 2.406,
+ 2.563, 2.305, 1.998, 1.792, 1.615, 1.472, 1.339, 1.322, 1.322, 1.326, 1.456, 1.593, 1.767, 1.973, 2.273, 2.532
+ ],
+ "sigma": 0.00178,
+ "sigma_Cb": 0.00217
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2498,
+ "ccm":
+ [
+ 1.14912, 0.28638, -0.43551,
+ -0.49691, 1.60391, -0.10701,
+ -0.10513, -1.09534, 2.20047
+ ]
+ },
+ {
+ "ct": 2821,
+ "ccm":
+ [
+ 1.18251, 0.15501, -0.33752,
+ -0.44304, 1.58495, -0.14191,
+ -0.05077, -0.96422, 2.01498
+ ]
+ },
+ {
+ "ct": 2925,
+ "ccm":
+ [
+ 1.18668, 0.00195, -0.18864,
+ -0.41617, 1.50514, -0.08897,
+ -0.02675, -0.91143, 1.93818
+ ]
+ },
+ {
+ "ct": 2926,
+ "ccm":
+ [
+ 1.50948, -0.44421, -0.06527,
+ -0.37241, 1.41726, -0.04486,
+ 0.07098, -0.84694, 1.77596
+ ]
+ },
+ {
+ "ct": 2951,
+ "ccm":
+ [
+ 1.52743, -0.47333, -0.05411,
+ -0.36485, 1.40764, -0.04279,
+ 0.08672, -0.90479, 1.81807
+ ]
+ },
+ {
+ "ct": 2954,
+ "ccm":
+ [
+ 1.51683, -0.46841, -0.04841,
+ -0.36288, 1.39914, -0.03625,
+ 0.06421, -0.82034, 1.75613
+ ]
+ },
+ {
+ "ct": 3578,
+ "ccm":
+ [
+ 1.59888, -0.59105, -0.00784,
+ -0.29366, 1.32037, -0.02671,
+ 0.06627, -0.76465, 1.69838
+ ]
+ },
+ {
+ "ct": 3717,
+ "ccm":
+ [
+ 1.59063, -0.58059, -0.01003,
+ -0.29583, 1.32715, -0.03132,
+ 0.03613, -0.67431, 1.63817
+ ]
+ },
+ {
+ "ct": 3784,
+ "ccm":
+ [
+ 1.59379, -0.58861, -0.00517,
+ -0.29178, 1.33292, -0.04115,
+ 0.03541, -0.66162, 1.62622
+ ]
+ },
+ {
+ "ct": 4485,
+ "ccm":
+ [
+ 1.40761, -0.34561, -0.06201,
+ -0.32388, 1.57221, -0.24832,
+ -0.01014, -0.63427, 1.64441
+ ]
+ },
+ {
+ "ct": 4615,
+ "ccm":
+ [
+ 1.41537, -0.35832, -0.05705,
+ -0.31429, 1.56019, -0.24591,
+ -0.01761, -0.61859, 1.63621
+ ]
+ },
+ {
+ "ct": 4671,
+ "ccm":
+ [
+ 1.42941, -0.38178, -0.04764,
+ -0.31421, 1.55925, -0.24504,
+ -0.01141, -0.62987, 1.64129
+ ]
+ },
+ {
+ "ct": 5753,
+ "ccm":
+ [
+ 1.64549, -0.63329, -0.01221,
+ -0.22431, 1.36423, -0.13992,
+ -0.00831, -0.55373, 1.56204
+ ]
+ },
+ {
+ "ct": 5773,
+ "ccm":
+ [
+ 1.63668, -0.63557, -0.00111,
+ -0.21919, 1.36234, -0.14315,
+ -0.00399, -0.57428, 1.57827
+ ]
+ },
+ {
+ "ct": 7433,
+ "ccm":
+ [
+ 1.36007, -0.09277, -0.26729,
+ -0.36886, 2.09249, -0.72363,
+ -0.12573, -0.76761, 1.89334
+ ]
+ },
+ {
+ "ct": 55792,
+ "ccm":
+ [
+ 1.65091, -0.63689, -0.01401,
+ -0.22277, 1.35752, -0.13475,
+ -0.00943, -0.55091, 1.56033
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 3.0,
+ "max": 15.0,
+ "default": 4.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 1.0,
+ "step_fine": 0.25,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.02,
+ "pdaf_squelch": 0.125,
+ "max_slew": 2.0,
+ "pdaf_frames": 20,
+ "dropout_frames": 6,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 16,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 445, 15.0, 925 ]
+ }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_wide.json b/src/ipa/rpi/vc4/data/imx708_wide.json
new file mode 100644
index 00000000..6f45aafc
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx708_wide.json
@@ -0,0 +1,682 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 9989,
+ "reference_gain": 1.23,
+ "reference_aperture": 1.0,
+ "reference_lux": 980,
+ "reference_Y": 8345
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 16.0,
+ "reference_slope": 4.0
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.00287
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2750.0, 0.7881, 0.2849,
+ 2940.0, 0.7559, 0.3103,
+ 3650.0, 0.6291, 0.4206,
+ 4625.0, 0.5336, 0.5161,
+ 5715.0, 0.4668, 0.5898
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.01165,
+ "transverse_neg": 0.01601
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.529, 1.526, 1.522, 1.506, 1.489, 1.473, 1.458, 1.456, 1.456, 1.458, 1.474, 1.493, 1.513, 1.531, 1.541, 1.544,
+ 1.527, 1.523, 1.511, 1.491, 1.474, 1.459, 1.445, 1.441, 1.441, 1.446, 1.461, 1.479, 1.499, 1.521, 1.536, 1.541,
+ 1.524, 1.515, 1.498, 1.477, 1.459, 1.444, 1.431, 1.426, 1.426, 1.435, 1.446, 1.466, 1.487, 1.507, 1.528, 1.538,
+ 1.522, 1.512, 1.491, 1.468, 1.447, 1.431, 1.423, 1.417, 1.418, 1.425, 1.435, 1.455, 1.479, 1.499, 1.523, 1.537,
+ 1.522, 1.509, 1.485, 1.463, 1.441, 1.423, 1.416, 1.413, 1.415, 1.418, 1.429, 1.449, 1.473, 1.495, 1.521, 1.538,
+ 1.522, 1.508, 1.483, 1.461, 1.438, 1.421, 1.413, 1.412, 1.412, 1.415, 1.428, 1.447, 1.471, 1.493, 1.519, 1.538,
+ 1.522, 1.509, 1.484, 1.462, 1.439, 1.421, 1.414, 1.411, 1.412, 1.416, 1.428, 1.447, 1.471, 1.493, 1.519, 1.537,
+ 1.523, 1.511, 1.487, 1.465, 1.443, 1.424, 1.417, 1.413, 1.415, 1.419, 1.429, 1.451, 1.473, 1.494, 1.519, 1.536,
+ 1.524, 1.514, 1.493, 1.471, 1.451, 1.434, 1.424, 1.419, 1.419, 1.428, 1.437, 1.457, 1.477, 1.498, 1.521, 1.538,
+ 1.527, 1.521, 1.503, 1.481, 1.462, 1.449, 1.434, 1.429, 1.429, 1.437, 1.451, 1.469, 1.488, 1.508, 1.527, 1.539,
+ 1.529, 1.527, 1.515, 1.495, 1.477, 1.462, 1.449, 1.444, 1.444, 1.451, 1.467, 1.481, 1.499, 1.519, 1.535, 1.543,
+ 1.534, 1.531, 1.527, 1.512, 1.492, 1.476, 1.463, 1.461, 1.461, 1.464, 1.479, 1.495, 1.515, 1.533, 1.543, 1.546
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 2.603, 2.599, 2.591, 2.567, 2.539, 2.515, 2.489, 2.489, 2.489, 2.491, 2.516, 2.543, 2.574, 2.597, 2.614, 2.617,
+ 2.596, 2.591, 2.571, 2.542, 2.516, 2.489, 2.464, 2.458, 2.458, 2.469, 2.492, 2.518, 2.547, 2.576, 2.602, 2.614,
+ 2.591, 2.576, 2.546, 2.519, 2.489, 2.464, 2.437, 2.427, 2.427, 2.441, 2.467, 2.492, 2.525, 2.553, 2.586, 2.605,
+ 2.588, 2.568, 2.534, 2.503, 2.472, 2.437, 2.423, 2.409, 2.411, 2.425, 2.441, 2.475, 2.513, 2.541, 2.577, 2.602,
+ 2.588, 2.565, 2.527, 2.494, 2.461, 2.425, 2.409, 2.399, 2.403, 2.409, 2.431, 2.466, 2.503, 2.534, 2.571, 2.601,
+ 2.586, 2.561, 2.525, 2.491, 2.454, 2.418, 2.399, 2.396, 2.395, 2.402, 2.424, 2.461, 2.501, 2.531, 2.567, 2.599,
+ 2.583, 2.559, 2.525, 2.491, 2.454, 2.418, 2.398, 2.393, 2.393, 2.401, 2.423, 2.459, 2.498, 2.531, 2.566, 2.597,
+ 2.583, 2.559, 2.526, 2.494, 2.458, 2.421, 2.404, 2.397, 2.399, 2.404, 2.426, 2.461, 2.501, 2.531, 2.566, 2.596,
+ 2.583, 2.563, 2.531, 2.501, 2.469, 2.435, 2.419, 2.405, 2.404, 2.422, 2.435, 2.471, 2.505, 2.537, 2.572, 2.596,
+ 2.585, 2.571, 2.539, 2.516, 2.486, 2.458, 2.435, 2.424, 2.424, 2.435, 2.459, 2.489, 2.521, 2.546, 2.579, 2.601,
+ 2.589, 2.578, 2.557, 2.532, 2.506, 2.483, 2.458, 2.449, 2.449, 2.459, 2.485, 2.507, 2.535, 2.563, 2.591, 2.605,
+ 2.589, 2.586, 2.575, 2.551, 2.525, 2.503, 2.481, 2.476, 2.476, 2.481, 2.504, 2.526, 2.555, 2.583, 2.604, 2.611
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.311, 3.339, 3.369, 3.374, 3.371, 3.363, 3.356, 3.353, 3.353, 3.353, 3.357, 3.362, 3.362, 3.356, 3.328, 3.311,
+ 3.321, 3.354, 3.374, 3.374, 3.368, 3.359, 3.352, 3.349, 3.347, 3.347, 3.349, 3.357, 3.361, 3.359, 3.343, 3.324,
+ 3.334, 3.368, 3.375, 3.374, 3.365, 3.356, 3.349, 3.347, 3.346, 3.346, 3.347, 3.349, 3.358, 3.361, 3.357, 3.336,
+ 3.346, 3.378, 3.378, 3.369, 3.363, 3.358, 3.351, 3.348, 3.347, 3.346, 3.347, 3.348, 3.354, 3.364, 3.363, 3.345,
+ 3.351, 3.381, 3.381, 3.368, 3.361, 3.357, 3.349, 3.347, 3.347, 3.345, 3.345, 3.347, 3.353, 3.364, 3.364, 3.347,
+ 3.353, 3.379, 3.379, 3.366, 3.359, 3.351, 3.348, 3.343, 3.342, 3.342, 3.343, 3.345, 3.351, 3.363, 3.363, 3.347,
+ 3.353, 3.376, 3.376, 3.363, 3.351, 3.347, 3.343, 3.338, 3.336, 3.338, 3.339, 3.343, 3.351, 3.361, 3.361, 3.347,
+ 3.351, 3.374, 3.374, 3.359, 3.351, 3.345, 3.338, 3.334, 3.333, 3.334, 3.336, 3.339, 3.347, 3.358, 3.358, 3.345,
+ 3.346, 3.368, 3.368, 3.359, 3.349, 3.343, 3.336, 3.332, 3.327, 3.331, 3.333, 3.337, 3.346, 3.356, 3.356, 3.341,
+ 3.336, 3.362, 3.364, 3.359, 3.351, 3.342, 3.334, 3.324, 3.324, 3.325, 3.329, 3.336, 3.346, 3.351, 3.351, 3.333,
+ 3.324, 3.349, 3.359, 3.358, 3.352, 3.341, 3.329, 3.323, 3.321, 3.322, 3.326, 3.336, 3.346, 3.347, 3.339, 3.319,
+ 3.311, 3.328, 3.352, 3.354, 3.352, 3.341, 3.329, 3.321, 3.319, 3.321, 3.324, 3.338, 3.343, 3.343, 3.319, 3.312
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.634, 1.647, 1.665, 1.668, 1.668, 1.664, 1.662, 1.662, 1.661, 1.661, 1.661, 1.663, 1.663, 1.659, 1.643, 1.636,
+ 1.639, 1.656, 1.668, 1.669, 1.668, 1.666, 1.664, 1.663, 1.663, 1.661, 1.661, 1.662, 1.663, 1.662, 1.654, 1.642,
+ 1.645, 1.663, 1.669, 1.668, 1.667, 1.667, 1.667, 1.668, 1.668, 1.665, 1.662, 1.661, 1.662, 1.664, 1.661, 1.649,
+ 1.651, 1.669, 1.669, 1.667, 1.666, 1.668, 1.669, 1.672, 1.672, 1.668, 1.665, 1.661, 1.661, 1.665, 1.665, 1.655,
+ 1.654, 1.669, 1.669, 1.666, 1.666, 1.669, 1.672, 1.673, 1.673, 1.671, 1.666, 1.661, 1.661, 1.665, 1.665, 1.659,
+ 1.654, 1.669, 1.669, 1.666, 1.666, 1.669, 1.671, 1.673, 1.672, 1.669, 1.667, 1.661, 1.661, 1.665, 1.665, 1.659,
+ 1.654, 1.668, 1.668, 1.664, 1.663, 1.667, 1.669, 1.671, 1.669, 1.668, 1.665, 1.661, 1.661, 1.663, 1.663, 1.659,
+ 1.653, 1.665, 1.665, 1.661, 1.661, 1.664, 1.667, 1.668, 1.668, 1.665, 1.661, 1.658, 1.659, 1.662, 1.662, 1.657,
+ 1.651, 1.664, 1.664, 1.659, 1.659, 1.661, 1.663, 1.663, 1.662, 1.661, 1.658, 1.656, 1.657, 1.662, 1.662, 1.655,
+ 1.645, 1.661, 1.663, 1.661, 1.659, 1.659, 1.659, 1.657, 1.657, 1.656, 1.654, 1.655, 1.656, 1.661, 1.661, 1.649,
+ 1.641, 1.654, 1.661, 1.661, 1.659, 1.657, 1.655, 1.653, 1.652, 1.651, 1.652, 1.653, 1.657, 1.658, 1.655, 1.644,
+ 1.635, 1.645, 1.661, 1.661, 1.661, 1.655, 1.653, 1.649, 1.648, 1.647, 1.651, 1.653, 1.657, 1.657, 1.646, 1.638
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 3.535, 3.279, 3.049, 2.722, 2.305, 1.958, 1.657, 1.647, 1.647, 1.656, 1.953, 2.289, 2.707, 3.058, 3.325, 3.589,
+ 3.379, 3.157, 2.874, 2.421, 1.973, 1.735, 1.472, 1.388, 1.388, 1.471, 1.724, 1.963, 2.409, 2.877, 3.185, 3.416,
+ 3.288, 3.075, 2.696, 2.169, 1.735, 1.472, 1.311, 1.208, 1.208, 1.306, 1.471, 1.724, 2.159, 2.695, 3.092, 3.321,
+ 3.238, 3.001, 2.534, 1.981, 1.572, 1.311, 1.207, 1.082, 1.082, 1.204, 1.306, 1.563, 1.973, 2.529, 3.008, 3.259,
+ 3.211, 2.938, 2.414, 1.859, 1.468, 1.221, 1.082, 1.036, 1.031, 1.079, 1.217, 1.463, 1.851, 2.403, 2.931, 3.229,
+ 3.206, 2.904, 2.356, 1.802, 1.421, 1.181, 1.037, 1.002, 1.002, 1.032, 1.175, 1.414, 1.793, 2.343, 2.899, 3.223,
+ 3.206, 2.904, 2.356, 1.802, 1.421, 1.181, 1.037, 1.005, 1.005, 1.032, 1.175, 1.414, 1.793, 2.343, 2.899, 3.223,
+ 3.211, 2.936, 2.417, 1.858, 1.468, 1.222, 1.083, 1.037, 1.032, 1.083, 1.218, 1.463, 1.848, 2.403, 2.932, 3.226,
+ 3.234, 2.997, 2.536, 1.979, 1.569, 1.311, 1.206, 1.084, 1.084, 1.204, 1.305, 1.565, 1.966, 2.524, 2.996, 3.251,
+ 3.282, 3.069, 2.697, 2.166, 1.731, 1.471, 1.311, 1.207, 1.207, 1.305, 1.466, 1.729, 2.158, 2.689, 3.077, 3.304,
+ 3.369, 3.146, 2.873, 2.415, 1.964, 1.722, 1.471, 1.382, 1.382, 1.466, 1.722, 1.964, 2.408, 2.871, 3.167, 3.401,
+ 3.524, 3.253, 3.025, 2.691, 2.275, 1.939, 1.657, 1.628, 1.628, 1.654, 1.936, 2.275, 2.687, 3.029, 3.284, 3.574
+ ],
+ "sigma": 0.00195,
+ "sigma_Cb": 0.00241
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2868,
+ "ccm":
+ [
+ 1.58923, -0.36649, -0.22273,
+ -0.43591, 1.84858, -0.41268,
+ 0.02948, -0.77666, 1.74718
+ ]
+ },
+ {
+ "ct": 2965,
+ "ccm":
+ [
+ 1.73397, -0.42794, -0.30603,
+ -0.36504, 1.72431, -0.35926,
+ 0.12765, -1.10933, 1.98168
+ ]
+ },
+ {
+ "ct": 3603,
+ "ccm":
+ [
+ 1.61787, -0.42704, -0.19084,
+ -0.37819, 1.74588, -0.36769,
+ 0.00961, -0.59807, 1.58847
+ ]
+ },
+ {
+ "ct": 4620,
+ "ccm":
+ [
+ 1.55581, -0.35422, -0.20158,
+ -0.31805, 1.79309, -0.47505,
+ -0.01256, -0.54489, 1.55746
+ ]
+ },
+ {
+ "ct": 5901,
+ "ccm":
+ [
+ 1.64439, -0.48855, -0.15585,
+ -0.29149, 1.67122, -0.37972,
+ -0.03111, -0.44052, 1.47163
+ ]
+ },
+ {
+ "ct": 7610,
+ "ccm":
+ [
+ 1.48667, -0.26072, -0.22595,
+ -0.21815, 1.86724, -0.64909,
+ -0.00985, -0.64485, 1.65471
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 4.0,
+ "max": 32.0,
+ "default": 6.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 2.0,
+ "step_fine": 0.5,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.03,
+ "pdaf_squelch": 0.2,
+ "max_slew": 4.0,
+ "pdaf_frames": 20,
+ "dropout_frames": 6,
+ "step_frames": 4
+ },
+ "fast":
+ {
+ "step_coarse": 2.0,
+ "step_fine": 0.5,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.05,
+ "pdaf_squelch": 0.2,
+ "max_slew": 5.0,
+ "pdaf_frames": 16,
+ "dropout_frames": 6,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 12,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 420, 35.0, 920 ]
+ }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_wide_noir.json b/src/ipa/rpi/vc4/data/imx708_wide_noir.json
new file mode 100644
index 00000000..b9a5227e
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx708_wide_noir.json
@@ -0,0 +1,673 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 9989,
+ "reference_gain": 1.23,
+ "reference_aperture": 1.0,
+ "reference_lux": 980,
+ "reference_Y": 8345
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 16.0,
+ "reference_slope": 4.0
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.00287
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 0,
+ "ct_curve":
+ [
+ 2750.0, 0.7881, 0.2849,
+ 2940.0, 0.7559, 0.3103,
+ 3650.0, 0.6291, 0.4206,
+ 4625.0, 0.5336, 0.5161,
+ 5715.0, 0.4668, 0.5898
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.01165,
+ "transverse_neg": 0.01601
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.529, 1.526, 1.522, 1.506, 1.489, 1.473, 1.458, 1.456, 1.456, 1.458, 1.474, 1.493, 1.513, 1.531, 1.541, 1.544,
+ 1.527, 1.523, 1.511, 1.491, 1.474, 1.459, 1.445, 1.441, 1.441, 1.446, 1.461, 1.479, 1.499, 1.521, 1.536, 1.541,
+ 1.524, 1.515, 1.498, 1.477, 1.459, 1.444, 1.431, 1.426, 1.426, 1.435, 1.446, 1.466, 1.487, 1.507, 1.528, 1.538,
+ 1.522, 1.512, 1.491, 1.468, 1.447, 1.431, 1.423, 1.417, 1.418, 1.425, 1.435, 1.455, 1.479, 1.499, 1.523, 1.537,
+ 1.522, 1.509, 1.485, 1.463, 1.441, 1.423, 1.416, 1.413, 1.415, 1.418, 1.429, 1.449, 1.473, 1.495, 1.521, 1.538,
+ 1.522, 1.508, 1.483, 1.461, 1.438, 1.421, 1.413, 1.412, 1.412, 1.415, 1.428, 1.447, 1.471, 1.493, 1.519, 1.538,
+ 1.522, 1.509, 1.484, 1.462, 1.439, 1.421, 1.414, 1.411, 1.412, 1.416, 1.428, 1.447, 1.471, 1.493, 1.519, 1.537,
+ 1.523, 1.511, 1.487, 1.465, 1.443, 1.424, 1.417, 1.413, 1.415, 1.419, 1.429, 1.451, 1.473, 1.494, 1.519, 1.536,
+ 1.524, 1.514, 1.493, 1.471, 1.451, 1.434, 1.424, 1.419, 1.419, 1.428, 1.437, 1.457, 1.477, 1.498, 1.521, 1.538,
+ 1.527, 1.521, 1.503, 1.481, 1.462, 1.449, 1.434, 1.429, 1.429, 1.437, 1.451, 1.469, 1.488, 1.508, 1.527, 1.539,
+ 1.529, 1.527, 1.515, 1.495, 1.477, 1.462, 1.449, 1.444, 1.444, 1.451, 1.467, 1.481, 1.499, 1.519, 1.535, 1.543,
+ 1.534, 1.531, 1.527, 1.512, 1.492, 1.476, 1.463, 1.461, 1.461, 1.464, 1.479, 1.495, 1.515, 1.533, 1.543, 1.546
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 2.603, 2.599, 2.591, 2.567, 2.539, 2.515, 2.489, 2.489, 2.489, 2.491, 2.516, 2.543, 2.574, 2.597, 2.614, 2.617,
+ 2.596, 2.591, 2.571, 2.542, 2.516, 2.489, 2.464, 2.458, 2.458, 2.469, 2.492, 2.518, 2.547, 2.576, 2.602, 2.614,
+ 2.591, 2.576, 2.546, 2.519, 2.489, 2.464, 2.437, 2.427, 2.427, 2.441, 2.467, 2.492, 2.525, 2.553, 2.586, 2.605,
+ 2.588, 2.568, 2.534, 2.503, 2.472, 2.437, 2.423, 2.409, 2.411, 2.425, 2.441, 2.475, 2.513, 2.541, 2.577, 2.602,
+ 2.588, 2.565, 2.527, 2.494, 2.461, 2.425, 2.409, 2.399, 2.403, 2.409, 2.431, 2.466, 2.503, 2.534, 2.571, 2.601,
+ 2.586, 2.561, 2.525, 2.491, 2.454, 2.418, 2.399, 2.396, 2.395, 2.402, 2.424, 2.461, 2.501, 2.531, 2.567, 2.599,
+ 2.583, 2.559, 2.525, 2.491, 2.454, 2.418, 2.398, 2.393, 2.393, 2.401, 2.423, 2.459, 2.498, 2.531, 2.566, 2.597,
+ 2.583, 2.559, 2.526, 2.494, 2.458, 2.421, 2.404, 2.397, 2.399, 2.404, 2.426, 2.461, 2.501, 2.531, 2.566, 2.596,
+ 2.583, 2.563, 2.531, 2.501, 2.469, 2.435, 2.419, 2.405, 2.404, 2.422, 2.435, 2.471, 2.505, 2.537, 2.572, 2.596,
+ 2.585, 2.571, 2.539, 2.516, 2.486, 2.458, 2.435, 2.424, 2.424, 2.435, 2.459, 2.489, 2.521, 2.546, 2.579, 2.601,
+ 2.589, 2.578, 2.557, 2.532, 2.506, 2.483, 2.458, 2.449, 2.449, 2.459, 2.485, 2.507, 2.535, 2.563, 2.591, 2.605,
+ 2.589, 2.586, 2.575, 2.551, 2.525, 2.503, 2.481, 2.476, 2.476, 2.481, 2.504, 2.526, 2.555, 2.583, 2.604, 2.611
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.311, 3.339, 3.369, 3.374, 3.371, 3.363, 3.356, 3.353, 3.353, 3.353, 3.357, 3.362, 3.362, 3.356, 3.328, 3.311,
+ 3.321, 3.354, 3.374, 3.374, 3.368, 3.359, 3.352, 3.349, 3.347, 3.347, 3.349, 3.357, 3.361, 3.359, 3.343, 3.324,
+ 3.334, 3.368, 3.375, 3.374, 3.365, 3.356, 3.349, 3.347, 3.346, 3.346, 3.347, 3.349, 3.358, 3.361, 3.357, 3.336,
+ 3.346, 3.378, 3.378, 3.369, 3.363, 3.358, 3.351, 3.348, 3.347, 3.346, 3.347, 3.348, 3.354, 3.364, 3.363, 3.345,
+ 3.351, 3.381, 3.381, 3.368, 3.361, 3.357, 3.349, 3.347, 3.347, 3.345, 3.345, 3.347, 3.353, 3.364, 3.364, 3.347,
+ 3.353, 3.379, 3.379, 3.366, 3.359, 3.351, 3.348, 3.343, 3.342, 3.342, 3.343, 3.345, 3.351, 3.363, 3.363, 3.347,
+ 3.353, 3.376, 3.376, 3.363, 3.351, 3.347, 3.343, 3.338, 3.336, 3.338, 3.339, 3.343, 3.351, 3.361, 3.361, 3.347,
+ 3.351, 3.374, 3.374, 3.359, 3.351, 3.345, 3.338, 3.334, 3.333, 3.334, 3.336, 3.339, 3.347, 3.358, 3.358, 3.345,
+ 3.346, 3.368, 3.368, 3.359, 3.349, 3.343, 3.336, 3.332, 3.327, 3.331, 3.333, 3.337, 3.346, 3.356, 3.356, 3.341,
+ 3.336, 3.362, 3.364, 3.359, 3.351, 3.342, 3.334, 3.324, 3.324, 3.325, 3.329, 3.336, 3.346, 3.351, 3.351, 3.333,
+ 3.324, 3.349, 3.359, 3.358, 3.352, 3.341, 3.329, 3.323, 3.321, 3.322, 3.326, 3.336, 3.346, 3.347, 3.339, 3.319,
+ 3.311, 3.328, 3.352, 3.354, 3.352, 3.341, 3.329, 3.321, 3.319, 3.321, 3.324, 3.338, 3.343, 3.343, 3.319, 3.312
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.634, 1.647, 1.665, 1.668, 1.668, 1.664, 1.662, 1.662, 1.661, 1.661, 1.661, 1.663, 1.663, 1.659, 1.643, 1.636,
+ 1.639, 1.656, 1.668, 1.669, 1.668, 1.666, 1.664, 1.663, 1.663, 1.661, 1.661, 1.662, 1.663, 1.662, 1.654, 1.642,
+ 1.645, 1.663, 1.669, 1.668, 1.667, 1.667, 1.667, 1.668, 1.668, 1.665, 1.662, 1.661, 1.662, 1.664, 1.661, 1.649,
+ 1.651, 1.669, 1.669, 1.667, 1.666, 1.668, 1.669, 1.672, 1.672, 1.668, 1.665, 1.661, 1.661, 1.665, 1.665, 1.655,
+ 1.654, 1.669, 1.669, 1.666, 1.666, 1.669, 1.672, 1.673, 1.673, 1.671, 1.666, 1.661, 1.661, 1.665, 1.665, 1.659,
+ 1.654, 1.669, 1.669, 1.666, 1.666, 1.669, 1.671, 1.673, 1.672, 1.669, 1.667, 1.661, 1.661, 1.665, 1.665, 1.659,
+ 1.654, 1.668, 1.668, 1.664, 1.663, 1.667, 1.669, 1.671, 1.669, 1.668, 1.665, 1.661, 1.661, 1.663, 1.663, 1.659,
+ 1.653, 1.665, 1.665, 1.661, 1.661, 1.664, 1.667, 1.668, 1.668, 1.665, 1.661, 1.658, 1.659, 1.662, 1.662, 1.657,
+ 1.651, 1.664, 1.664, 1.659, 1.659, 1.661, 1.663, 1.663, 1.662, 1.661, 1.658, 1.656, 1.657, 1.662, 1.662, 1.655,
+ 1.645, 1.661, 1.663, 1.661, 1.659, 1.659, 1.659, 1.657, 1.657, 1.656, 1.654, 1.655, 1.656, 1.661, 1.661, 1.649,
+ 1.641, 1.654, 1.661, 1.661, 1.659, 1.657, 1.655, 1.653, 1.652, 1.651, 1.652, 1.653, 1.657, 1.658, 1.655, 1.644,
+ 1.635, 1.645, 1.661, 1.661, 1.661, 1.655, 1.653, 1.649, 1.648, 1.647, 1.651, 1.653, 1.657, 1.657, 1.646, 1.638
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 3.535, 3.279, 3.049, 2.722, 2.305, 1.958, 1.657, 1.647, 1.647, 1.656, 1.953, 2.289, 2.707, 3.058, 3.325, 3.589,
+ 3.379, 3.157, 2.874, 2.421, 1.973, 1.735, 1.472, 1.388, 1.388, 1.471, 1.724, 1.963, 2.409, 2.877, 3.185, 3.416,
+ 3.288, 3.075, 2.696, 2.169, 1.735, 1.472, 1.311, 1.208, 1.208, 1.306, 1.471, 1.724, 2.159, 2.695, 3.092, 3.321,
+ 3.238, 3.001, 2.534, 1.981, 1.572, 1.311, 1.207, 1.082, 1.082, 1.204, 1.306, 1.563, 1.973, 2.529, 3.008, 3.259,
+ 3.211, 2.938, 2.414, 1.859, 1.468, 1.221, 1.082, 1.036, 1.031, 1.079, 1.217, 1.463, 1.851, 2.403, 2.931, 3.229,
+ 3.206, 2.904, 2.356, 1.802, 1.421, 1.181, 1.037, 1.002, 1.002, 1.032, 1.175, 1.414, 1.793, 2.343, 2.899, 3.223,
+ 3.206, 2.904, 2.356, 1.802, 1.421, 1.181, 1.037, 1.005, 1.005, 1.032, 1.175, 1.414, 1.793, 2.343, 2.899, 3.223,
+ 3.211, 2.936, 2.417, 1.858, 1.468, 1.222, 1.083, 1.037, 1.032, 1.083, 1.218, 1.463, 1.848, 2.403, 2.932, 3.226,
+ 3.234, 2.997, 2.536, 1.979, 1.569, 1.311, 1.206, 1.084, 1.084, 1.204, 1.305, 1.565, 1.966, 2.524, 2.996, 3.251,
+ 3.282, 3.069, 2.697, 2.166, 1.731, 1.471, 1.311, 1.207, 1.207, 1.305, 1.466, 1.729, 2.158, 2.689, 3.077, 3.304,
+ 3.369, 3.146, 2.873, 2.415, 1.964, 1.722, 1.471, 1.382, 1.382, 1.466, 1.722, 1.964, 2.408, 2.871, 3.167, 3.401,
+ 3.524, 3.253, 3.025, 2.691, 2.275, 1.939, 1.657, 1.628, 1.628, 1.654, 1.936, 2.275, 2.687, 3.029, 3.284, 3.574
+ ],
+ "sigma": 0.00195,
+ "sigma_Cb": 0.00241
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2750,
+ "ccm":
+ [
+ 1.13004, 0.36392, -0.49396,
+ -0.45885, 1.68171, -0.22286,
+ -0.06473, -0.86962, 1.93435
+ ]
+ },
+ {
+ "ct": 2940,
+ "ccm":
+ [
+ 1.29876, 0.09627, -0.39503,
+ -0.43085, 1.60258, -0.17172,
+ -0.02638, -0.92581, 1.95218
+ ]
+ },
+ {
+ "ct": 3650,
+ "ccm":
+ [
+ 1.57729, -0.29734, -0.27995,
+ -0.42965, 1.66231, -0.23265,
+ -0.02183, -0.62331, 1.64514
+ ]
+ },
+ {
+ "ct": 4625,
+ "ccm":
+ [
+ 1.52145, -0.22382, -0.29763,
+ -0.40445, 1.82186, -0.41742,
+ -0.05732, -0.56222, 1.61954
+ ]
+ },
+ {
+ "ct": 5715,
+ "ccm":
+ [
+ 1.67851, -0.39193, -0.28658,
+ -0.37169, 1.72949, -0.35781,
+ -0.09556, -0.41951, 1.51508
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 4.0,
+ "max": 32.0,
+ "default": 6.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 2.0,
+ "step_fine": 0.5,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.03,
+ "pdaf_squelch": 0.2,
+ "max_slew": 4.0,
+ "pdaf_frames": 20,
+ "dropout_frames": 6,
+ "step_frames": 4
+ },
+ "fast":
+ {
+ "step_coarse": 2.0,
+ "step_fine": 0.5,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.05,
+ "pdaf_squelch": 0.2,
+ "max_slew": 5.0,
+ "pdaf_frames": 16,
+ "dropout_frames": 6,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 12,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 420, 35.0, 920 ]
+ }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/raspberrypi/data/meson.build b/src/ipa/rpi/vc4/data/meson.build
index 211811cf..afbf875a 100644
--- a/src/ipa/raspberrypi/data/meson.build
+++ b/src/ipa/rpi/vc4/data/meson.build
@@ -5,16 +5,24 @@ conf_files = files([
'imx219_noir.json',
'imx290.json',
'imx296.json',
+ 'imx296_mono.json',
'imx378.json',
'imx477.json',
'imx477_noir.json',
+ 'imx477_scientific.json',
'imx519.json',
+ 'imx708.json',
+ 'imx708_noir.json',
+ 'imx708_wide.json',
+ 'imx708_wide_noir.json',
'ov5647.json',
'ov5647_noir.json',
- 'ov9281.json',
+ 'ov64a40.json',
+ 'ov9281_mono.json',
'se327m12.json',
'uncalibrated.json',
])
install_data(conf_files,
- install_dir : ipa_data_dir / 'raspberrypi')
+ install_dir : ipa_data_dir / 'rpi' / 'vc4',
+ install_tag : 'runtime')
diff --git a/src/ipa/rpi/vc4/data/ov5647.json b/src/ipa/rpi/vc4/data/ov5647.json
new file mode 100644
index 00000000..40c6059c
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov5647.json
@@ -0,0 +1,696 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 1024
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 21663,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 987,
+ "reference_Y": 8961
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 4.25
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 401,
+ "slope": 0.05619
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2500.0, 1.0289, 0.4503,
+ 2803.0, 0.9428, 0.5108,
+ 2914.0, 0.9406, 0.5127,
+ 3605.0, 0.8261, 0.6249,
+ 4540.0, 0.7331, 0.7533,
+ 5699.0, 0.6715, 0.8627,
+ 8625.0, 0.6081, 1.0012
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.0321,
+ "transverse_neg": 0.04313
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "base_ev": 1.25
+ },
+ {
+ "base_ev": 1.25,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.25,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.105, 1.103, 1.093, 1.083, 1.071, 1.065, 1.065, 1.065, 1.066, 1.069, 1.072, 1.077, 1.084, 1.089, 1.093, 1.093,
+ 1.103, 1.096, 1.084, 1.072, 1.059, 1.051, 1.047, 1.047, 1.051, 1.053, 1.059, 1.067, 1.075, 1.082, 1.085, 1.086,
+ 1.096, 1.084, 1.072, 1.059, 1.051, 1.045, 1.039, 1.038, 1.039, 1.045, 1.049, 1.057, 1.063, 1.072, 1.081, 1.082,
+ 1.092, 1.075, 1.061, 1.052, 1.045, 1.039, 1.036, 1.035, 1.035, 1.039, 1.044, 1.049, 1.056, 1.063, 1.072, 1.081,
+ 1.092, 1.073, 1.058, 1.048, 1.043, 1.038, 1.035, 1.033, 1.033, 1.035, 1.039, 1.044, 1.051, 1.057, 1.069, 1.078,
+ 1.091, 1.068, 1.054, 1.045, 1.041, 1.038, 1.035, 1.032, 1.032, 1.032, 1.036, 1.041, 1.045, 1.055, 1.069, 1.078,
+ 1.091, 1.068, 1.052, 1.043, 1.041, 1.038, 1.035, 1.032, 1.031, 1.032, 1.034, 1.036, 1.043, 1.055, 1.069, 1.078,
+ 1.092, 1.068, 1.052, 1.047, 1.042, 1.041, 1.038, 1.035, 1.032, 1.032, 1.035, 1.039, 1.043, 1.055, 1.071, 1.079,
+ 1.092, 1.073, 1.057, 1.051, 1.047, 1.047, 1.044, 1.041, 1.038, 1.038, 1.039, 1.043, 1.051, 1.059, 1.076, 1.083,
+ 1.092, 1.081, 1.068, 1.058, 1.056, 1.056, 1.053, 1.052, 1.049, 1.048, 1.048, 1.051, 1.059, 1.066, 1.083, 1.085,
+ 1.091, 1.087, 1.081, 1.068, 1.065, 1.064, 1.062, 1.062, 1.061, 1.056, 1.056, 1.056, 1.064, 1.069, 1.084, 1.089,
+ 1.091, 1.089, 1.085, 1.079, 1.069, 1.068, 1.067, 1.067, 1.067, 1.063, 1.061, 1.063, 1.068, 1.069, 1.081, 1.092
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.486, 1.484, 1.468, 1.449, 1.427, 1.403, 1.399, 1.399, 1.399, 1.404, 1.413, 1.433, 1.454, 1.473, 1.482, 1.488,
+ 1.484, 1.472, 1.454, 1.431, 1.405, 1.381, 1.365, 1.365, 1.367, 1.373, 1.392, 1.411, 1.438, 1.458, 1.476, 1.481,
+ 1.476, 1.458, 1.433, 1.405, 1.381, 1.361, 1.339, 1.334, 1.334, 1.346, 1.362, 1.391, 1.411, 1.438, 1.462, 1.474,
+ 1.471, 1.443, 1.417, 1.388, 1.361, 1.339, 1.321, 1.313, 1.313, 1.327, 1.346, 1.362, 1.391, 1.422, 1.453, 1.473,
+ 1.469, 1.439, 1.408, 1.377, 1.349, 1.321, 1.312, 1.299, 1.299, 1.311, 1.327, 1.348, 1.378, 1.415, 1.446, 1.468,
+ 1.468, 1.434, 1.402, 1.371, 1.341, 1.316, 1.299, 1.296, 1.295, 1.299, 1.314, 1.338, 1.371, 1.408, 1.441, 1.466,
+ 1.468, 1.434, 1.401, 1.371, 1.341, 1.316, 1.301, 1.296, 1.295, 1.297, 1.314, 1.338, 1.369, 1.408, 1.441, 1.465,
+ 1.469, 1.436, 1.401, 1.374, 1.348, 1.332, 1.315, 1.301, 1.301, 1.313, 1.324, 1.342, 1.372, 1.409, 1.442, 1.465,
+ 1.471, 1.444, 1.413, 1.388, 1.371, 1.348, 1.332, 1.323, 1.323, 1.324, 1.342, 1.362, 1.386, 1.418, 1.449, 1.467,
+ 1.473, 1.454, 1.431, 1.407, 1.388, 1.371, 1.359, 1.352, 1.351, 1.351, 1.362, 1.383, 1.404, 1.433, 1.462, 1.472,
+ 1.474, 1.461, 1.447, 1.424, 1.407, 1.394, 1.385, 1.381, 1.379, 1.381, 1.383, 1.401, 1.419, 1.444, 1.466, 1.481,
+ 1.474, 1.464, 1.455, 1.442, 1.421, 1.408, 1.403, 1.403, 1.403, 1.399, 1.402, 1.415, 1.432, 1.446, 1.467, 1.483
+ ]
+ },
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.567, 1.565, 1.555, 1.541, 1.525, 1.518, 1.518, 1.518, 1.521, 1.527, 1.532, 1.541, 1.551, 1.559, 1.567, 1.569,
+ 1.565, 1.557, 1.542, 1.527, 1.519, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.533, 1.542, 1.553, 1.559, 1.562,
+ 1.561, 1.546, 1.532, 1.521, 1.518, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.529, 1.533, 1.542, 1.554, 1.559,
+ 1.561, 1.539, 1.526, 1.524, 1.521, 1.521, 1.522, 1.524, 1.525, 1.531, 1.529, 1.529, 1.531, 1.538, 1.549, 1.558,
+ 1.559, 1.538, 1.526, 1.525, 1.524, 1.528, 1.534, 1.536, 1.536, 1.536, 1.532, 1.529, 1.531, 1.537, 1.548, 1.556,
+ 1.561, 1.537, 1.525, 1.524, 1.526, 1.532, 1.537, 1.539, 1.538, 1.537, 1.532, 1.529, 1.529, 1.537, 1.546, 1.556,
+ 1.561, 1.536, 1.524, 1.522, 1.525, 1.532, 1.538, 1.538, 1.537, 1.533, 1.528, 1.526, 1.527, 1.536, 1.546, 1.555,
+ 1.561, 1.537, 1.522, 1.521, 1.524, 1.531, 1.536, 1.537, 1.534, 1.529, 1.526, 1.522, 1.523, 1.534, 1.547, 1.555,
+ 1.561, 1.538, 1.524, 1.522, 1.526, 1.531, 1.535, 1.535, 1.534, 1.527, 1.524, 1.522, 1.522, 1.535, 1.549, 1.556,
+ 1.558, 1.543, 1.532, 1.526, 1.526, 1.529, 1.534, 1.535, 1.533, 1.526, 1.523, 1.522, 1.524, 1.537, 1.552, 1.557,
+ 1.555, 1.546, 1.541, 1.528, 1.527, 1.528, 1.531, 1.533, 1.531, 1.527, 1.522, 1.522, 1.526, 1.536, 1.552, 1.561,
+ 1.555, 1.547, 1.542, 1.538, 1.526, 1.526, 1.529, 1.531, 1.529, 1.528, 1.519, 1.519, 1.527, 1.531, 1.543, 1.561
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.684, 1.688, 1.691, 1.697, 1.709, 1.722, 1.735, 1.745, 1.747, 1.745, 1.731, 1.719, 1.709, 1.705, 1.699, 1.699,
+ 1.684, 1.689, 1.694, 1.708, 1.721, 1.735, 1.747, 1.762, 1.762, 1.758, 1.745, 1.727, 1.716, 1.707, 1.701, 1.699,
+ 1.684, 1.691, 1.704, 1.719, 1.734, 1.755, 1.772, 1.786, 1.789, 1.788, 1.762, 1.745, 1.724, 1.709, 1.702, 1.698,
+ 1.682, 1.694, 1.709, 1.729, 1.755, 1.773, 1.798, 1.815, 1.817, 1.808, 1.788, 1.762, 1.733, 1.714, 1.704, 1.699,
+ 1.682, 1.693, 1.713, 1.742, 1.772, 1.798, 1.815, 1.829, 1.831, 1.821, 1.807, 1.773, 1.742, 1.716, 1.703, 1.699,
+ 1.681, 1.693, 1.713, 1.742, 1.772, 1.799, 1.828, 1.839, 1.839, 1.828, 1.807, 1.774, 1.742, 1.715, 1.699, 1.695,
+ 1.679, 1.691, 1.712, 1.739, 1.771, 1.798, 1.825, 1.829, 1.831, 1.818, 1.801, 1.774, 1.738, 1.712, 1.695, 1.691,
+ 1.676, 1.685, 1.703, 1.727, 1.761, 1.784, 1.801, 1.817, 1.817, 1.801, 1.779, 1.761, 1.729, 1.706, 1.691, 1.684,
+ 1.669, 1.678, 1.692, 1.714, 1.741, 1.764, 1.784, 1.795, 1.795, 1.779, 1.761, 1.738, 1.713, 1.696, 1.683, 1.679,
+ 1.664, 1.671, 1.679, 1.693, 1.716, 1.741, 1.762, 1.769, 1.769, 1.753, 1.738, 1.713, 1.701, 1.687, 1.681, 1.676,
+ 1.661, 1.664, 1.671, 1.679, 1.693, 1.714, 1.732, 1.739, 1.739, 1.729, 1.708, 1.701, 1.685, 1.679, 1.676, 1.677,
+ 1.659, 1.661, 1.664, 1.671, 1.679, 1.693, 1.712, 1.714, 1.714, 1.708, 1.701, 1.687, 1.679, 1.672, 1.673, 1.677
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.177, 1.183, 1.187, 1.191, 1.197, 1.206, 1.213, 1.215, 1.215, 1.215, 1.211, 1.204, 1.196, 1.191, 1.183, 1.182,
+ 1.179, 1.185, 1.191, 1.196, 1.206, 1.217, 1.224, 1.229, 1.229, 1.226, 1.221, 1.212, 1.202, 1.195, 1.188, 1.182,
+ 1.183, 1.191, 1.196, 1.206, 1.217, 1.229, 1.239, 1.245, 1.245, 1.245, 1.233, 1.221, 1.212, 1.199, 1.193, 1.187,
+ 1.183, 1.192, 1.201, 1.212, 1.229, 1.241, 1.252, 1.259, 1.259, 1.257, 1.245, 1.233, 1.217, 1.201, 1.194, 1.192,
+ 1.183, 1.192, 1.202, 1.219, 1.238, 1.252, 1.261, 1.269, 1.268, 1.261, 1.257, 1.241, 1.223, 1.204, 1.194, 1.191,
+ 1.182, 1.192, 1.202, 1.219, 1.239, 1.255, 1.266, 1.271, 1.271, 1.265, 1.258, 1.242, 1.223, 1.205, 1.192, 1.191,
+ 1.181, 1.189, 1.199, 1.218, 1.239, 1.254, 1.262, 1.268, 1.268, 1.258, 1.253, 1.241, 1.221, 1.204, 1.191, 1.187,
+ 1.179, 1.184, 1.193, 1.211, 1.232, 1.243, 1.254, 1.257, 1.256, 1.253, 1.242, 1.232, 1.216, 1.199, 1.187, 1.183,
+ 1.174, 1.179, 1.187, 1.202, 1.218, 1.232, 1.243, 1.246, 1.246, 1.239, 1.232, 1.218, 1.207, 1.191, 1.183, 1.179,
+ 1.169, 1.175, 1.181, 1.189, 1.202, 1.218, 1.229, 1.232, 1.232, 1.224, 1.218, 1.207, 1.199, 1.185, 1.181, 1.174,
+ 1.164, 1.168, 1.175, 1.179, 1.189, 1.201, 1.209, 1.213, 1.213, 1.209, 1.201, 1.198, 1.186, 1.181, 1.174, 1.173,
+ 1.161, 1.166, 1.171, 1.175, 1.179, 1.189, 1.197, 1.198, 1.198, 1.197, 1.196, 1.186, 1.182, 1.175, 1.173, 1.173
+ ]
+ },
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.166, 1.171, 1.173, 1.178, 1.187, 1.193, 1.201, 1.205, 1.205, 1.205, 1.199, 1.191, 1.184, 1.179, 1.174, 1.171,
+ 1.166, 1.172, 1.176, 1.184, 1.195, 1.202, 1.209, 1.216, 1.216, 1.213, 1.208, 1.201, 1.189, 1.182, 1.176, 1.171,
+ 1.166, 1.173, 1.183, 1.195, 1.202, 1.214, 1.221, 1.228, 1.229, 1.228, 1.221, 1.209, 1.201, 1.186, 1.179, 1.174,
+ 1.165, 1.174, 1.187, 1.201, 1.214, 1.223, 1.235, 1.241, 1.242, 1.241, 1.229, 1.221, 1.205, 1.188, 1.181, 1.177,
+ 1.165, 1.174, 1.189, 1.207, 1.223, 1.235, 1.242, 1.253, 1.252, 1.245, 1.241, 1.228, 1.211, 1.189, 1.181, 1.178,
+ 1.164, 1.173, 1.189, 1.207, 1.224, 1.238, 1.249, 1.255, 1.255, 1.249, 1.242, 1.228, 1.211, 1.191, 1.179, 1.176,
+ 1.163, 1.172, 1.187, 1.207, 1.223, 1.237, 1.245, 1.253, 1.252, 1.243, 1.237, 1.228, 1.207, 1.188, 1.176, 1.173,
+ 1.159, 1.167, 1.179, 1.199, 1.217, 1.227, 1.237, 1.241, 1.241, 1.237, 1.228, 1.217, 1.201, 1.184, 1.174, 1.169,
+ 1.156, 1.164, 1.172, 1.189, 1.205, 1.217, 1.226, 1.229, 1.229, 1.222, 1.217, 1.204, 1.192, 1.177, 1.171, 1.166,
+ 1.154, 1.159, 1.166, 1.177, 1.189, 1.205, 1.213, 1.216, 1.216, 1.209, 1.204, 1.192, 1.183, 1.172, 1.168, 1.162,
+ 1.152, 1.155, 1.161, 1.166, 1.177, 1.188, 1.195, 1.198, 1.199, 1.196, 1.187, 1.183, 1.173, 1.168, 1.163, 1.162,
+ 1.151, 1.154, 1.158, 1.162, 1.168, 1.177, 1.183, 1.184, 1.184, 1.184, 1.182, 1.172, 1.168, 1.165, 1.162, 1.161
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.236, 2.111, 1.912, 1.741, 1.579, 1.451, 1.379, 1.349, 1.349, 1.361, 1.411, 1.505, 1.644, 1.816, 2.034, 2.159,
+ 2.139, 1.994, 1.796, 1.625, 1.467, 1.361, 1.285, 1.248, 1.239, 1.265, 1.321, 1.408, 1.536, 1.703, 1.903, 2.087,
+ 2.047, 1.898, 1.694, 1.511, 1.373, 1.254, 1.186, 1.152, 1.142, 1.166, 1.226, 1.309, 1.441, 1.598, 1.799, 1.978,
+ 1.999, 1.824, 1.615, 1.429, 1.281, 1.179, 1.113, 1.077, 1.071, 1.096, 1.153, 1.239, 1.357, 1.525, 1.726, 1.915,
+ 1.976, 1.773, 1.563, 1.374, 1.222, 1.119, 1.064, 1.032, 1.031, 1.049, 1.099, 1.188, 1.309, 1.478, 1.681, 1.893,
+ 1.973, 1.756, 1.542, 1.351, 1.196, 1.088, 1.028, 1.011, 1.004, 1.029, 1.077, 1.169, 1.295, 1.459, 1.663, 1.891,
+ 1.973, 1.761, 1.541, 1.349, 1.193, 1.087, 1.031, 1.006, 1.006, 1.023, 1.075, 1.169, 1.298, 1.463, 1.667, 1.891,
+ 1.982, 1.789, 1.568, 1.373, 1.213, 1.111, 1.051, 1.029, 1.024, 1.053, 1.106, 1.199, 1.329, 1.495, 1.692, 1.903,
+ 2.015, 1.838, 1.621, 1.426, 1.268, 1.159, 1.101, 1.066, 1.068, 1.099, 1.166, 1.259, 1.387, 1.553, 1.751, 1.937,
+ 2.076, 1.911, 1.692, 1.507, 1.346, 1.236, 1.169, 1.136, 1.139, 1.174, 1.242, 1.349, 1.475, 1.641, 1.833, 2.004,
+ 2.193, 2.011, 1.798, 1.604, 1.444, 1.339, 1.265, 1.235, 1.237, 1.273, 1.351, 1.461, 1.598, 1.758, 1.956, 2.125,
+ 2.263, 2.154, 1.916, 1.711, 1.549, 1.432, 1.372, 1.356, 1.356, 1.383, 1.455, 1.578, 1.726, 1.914, 2.119, 2.211
+ ],
+ "sigma": 0.006,
+ "sigma_Cb": 0.00208
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2873,
+ "ccm":
+ [
+ 1.88195, -0.26249, -0.61946,
+ -0.63842, 2.11535, -0.47693,
+ -0.13531, -0.99739, 2.13271
+ ]
+ },
+ {
+ "ct": 2965,
+ "ccm":
+ [
+ 2.15048, -0.51859, -0.63189,
+ -0.53572, 1.92585, -0.39013,
+ 0.01831, -1.48576, 2.46744
+ ]
+ },
+ {
+ "ct": 3606,
+ "ccm":
+ [
+ 1.97522, -0.43847, -0.53675,
+ -0.56151, 1.99765, -0.43614,
+ -0.12438, -0.77056, 1.89493
+ ]
+ },
+ {
+ "ct": 4700,
+ "ccm":
+ [
+ 2.00971, -0.51461, -0.49511,
+ -0.52109, 2.01003, -0.48894,
+ -0.09527, -0.67318, 1.76845
+ ]
+ },
+ {
+ "ct": 5890,
+ "ccm":
+ [
+ 2.13616, -0.65283, -0.48333,
+ -0.48364, 1.93115, -0.44751,
+ -0.13465, -0.54831, 1.68295
+ ]
+ },
+ {
+ "ct": 7600,
+ "ccm":
+ [
+ 2.06599, -0.39161, -0.67439,
+ -0.50883, 2.27467, -0.76583,
+ -0.13961, -0.66121, 1.80081
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/ov5647_noir.json b/src/ipa/rpi/vc4/data/ov5647_noir.json
new file mode 100644
index 00000000..488b7119
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov5647_noir.json
@@ -0,0 +1,412 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 1024
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 21663,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 987,
+ "reference_Y": 8961
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 4.25
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 401,
+ "slope": 0.05619
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "base_ev": 1.25
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.105, 1.103, 1.093, 1.083, 1.071, 1.065, 1.065, 1.065, 1.066, 1.069, 1.072, 1.077, 1.084, 1.089, 1.093, 1.093,
+ 1.103, 1.096, 1.084, 1.072, 1.059, 1.051, 1.047, 1.047, 1.051, 1.053, 1.059, 1.067, 1.075, 1.082, 1.085, 1.086,
+ 1.096, 1.084, 1.072, 1.059, 1.051, 1.045, 1.039, 1.038, 1.039, 1.045, 1.049, 1.057, 1.063, 1.072, 1.081, 1.082,
+ 1.092, 1.075, 1.061, 1.052, 1.045, 1.039, 1.036, 1.035, 1.035, 1.039, 1.044, 1.049, 1.056, 1.063, 1.072, 1.081,
+ 1.092, 1.073, 1.058, 1.048, 1.043, 1.038, 1.035, 1.033, 1.033, 1.035, 1.039, 1.044, 1.051, 1.057, 1.069, 1.078,
+ 1.091, 1.068, 1.054, 1.045, 1.041, 1.038, 1.035, 1.032, 1.032, 1.032, 1.036, 1.041, 1.045, 1.055, 1.069, 1.078,
+ 1.091, 1.068, 1.052, 1.043, 1.041, 1.038, 1.035, 1.032, 1.031, 1.032, 1.034, 1.036, 1.043, 1.055, 1.069, 1.078,
+ 1.092, 1.068, 1.052, 1.047, 1.042, 1.041, 1.038, 1.035, 1.032, 1.032, 1.035, 1.039, 1.043, 1.055, 1.071, 1.079,
+ 1.092, 1.073, 1.057, 1.051, 1.047, 1.047, 1.044, 1.041, 1.038, 1.038, 1.039, 1.043, 1.051, 1.059, 1.076, 1.083,
+ 1.092, 1.081, 1.068, 1.058, 1.056, 1.056, 1.053, 1.052, 1.049, 1.048, 1.048, 1.051, 1.059, 1.066, 1.083, 1.085,
+ 1.091, 1.087, 1.081, 1.068, 1.065, 1.064, 1.062, 1.062, 1.061, 1.056, 1.056, 1.056, 1.064, 1.069, 1.084, 1.089,
+ 1.091, 1.089, 1.085, 1.079, 1.069, 1.068, 1.067, 1.067, 1.067, 1.063, 1.061, 1.063, 1.068, 1.069, 1.081, 1.092
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.486, 1.484, 1.468, 1.449, 1.427, 1.403, 1.399, 1.399, 1.399, 1.404, 1.413, 1.433, 1.454, 1.473, 1.482, 1.488,
+ 1.484, 1.472, 1.454, 1.431, 1.405, 1.381, 1.365, 1.365, 1.367, 1.373, 1.392, 1.411, 1.438, 1.458, 1.476, 1.481,
+ 1.476, 1.458, 1.433, 1.405, 1.381, 1.361, 1.339, 1.334, 1.334, 1.346, 1.362, 1.391, 1.411, 1.438, 1.462, 1.474,
+ 1.471, 1.443, 1.417, 1.388, 1.361, 1.339, 1.321, 1.313, 1.313, 1.327, 1.346, 1.362, 1.391, 1.422, 1.453, 1.473,
+ 1.469, 1.439, 1.408, 1.377, 1.349, 1.321, 1.312, 1.299, 1.299, 1.311, 1.327, 1.348, 1.378, 1.415, 1.446, 1.468,
+ 1.468, 1.434, 1.402, 1.371, 1.341, 1.316, 1.299, 1.296, 1.295, 1.299, 1.314, 1.338, 1.371, 1.408, 1.441, 1.466,
+ 1.468, 1.434, 1.401, 1.371, 1.341, 1.316, 1.301, 1.296, 1.295, 1.297, 1.314, 1.338, 1.369, 1.408, 1.441, 1.465,
+ 1.469, 1.436, 1.401, 1.374, 1.348, 1.332, 1.315, 1.301, 1.301, 1.313, 1.324, 1.342, 1.372, 1.409, 1.442, 1.465,
+ 1.471, 1.444, 1.413, 1.388, 1.371, 1.348, 1.332, 1.323, 1.323, 1.324, 1.342, 1.362, 1.386, 1.418, 1.449, 1.467,
+ 1.473, 1.454, 1.431, 1.407, 1.388, 1.371, 1.359, 1.352, 1.351, 1.351, 1.362, 1.383, 1.404, 1.433, 1.462, 1.472,
+ 1.474, 1.461, 1.447, 1.424, 1.407, 1.394, 1.385, 1.381, 1.379, 1.381, 1.383, 1.401, 1.419, 1.444, 1.466, 1.481,
+ 1.474, 1.464, 1.455, 1.442, 1.421, 1.408, 1.403, 1.403, 1.403, 1.399, 1.402, 1.415, 1.432, 1.446, 1.467, 1.483
+ ]
+ },
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.567, 1.565, 1.555, 1.541, 1.525, 1.518, 1.518, 1.518, 1.521, 1.527, 1.532, 1.541, 1.551, 1.559, 1.567, 1.569,
+ 1.565, 1.557, 1.542, 1.527, 1.519, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.533, 1.542, 1.553, 1.559, 1.562,
+ 1.561, 1.546, 1.532, 1.521, 1.518, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.529, 1.533, 1.542, 1.554, 1.559,
+ 1.561, 1.539, 1.526, 1.524, 1.521, 1.521, 1.522, 1.524, 1.525, 1.531, 1.529, 1.529, 1.531, 1.538, 1.549, 1.558,
+ 1.559, 1.538, 1.526, 1.525, 1.524, 1.528, 1.534, 1.536, 1.536, 1.536, 1.532, 1.529, 1.531, 1.537, 1.548, 1.556,
+ 1.561, 1.537, 1.525, 1.524, 1.526, 1.532, 1.537, 1.539, 1.538, 1.537, 1.532, 1.529, 1.529, 1.537, 1.546, 1.556,
+ 1.561, 1.536, 1.524, 1.522, 1.525, 1.532, 1.538, 1.538, 1.537, 1.533, 1.528, 1.526, 1.527, 1.536, 1.546, 1.555,
+ 1.561, 1.537, 1.522, 1.521, 1.524, 1.531, 1.536, 1.537, 1.534, 1.529, 1.526, 1.522, 1.523, 1.534, 1.547, 1.555,
+ 1.561, 1.538, 1.524, 1.522, 1.526, 1.531, 1.535, 1.535, 1.534, 1.527, 1.524, 1.522, 1.522, 1.535, 1.549, 1.556,
+ 1.558, 1.543, 1.532, 1.526, 1.526, 1.529, 1.534, 1.535, 1.533, 1.526, 1.523, 1.522, 1.524, 1.537, 1.552, 1.557,
+ 1.555, 1.546, 1.541, 1.528, 1.527, 1.528, 1.531, 1.533, 1.531, 1.527, 1.522, 1.522, 1.526, 1.536, 1.552, 1.561,
+ 1.555, 1.547, 1.542, 1.538, 1.526, 1.526, 1.529, 1.531, 1.529, 1.528, 1.519, 1.519, 1.527, 1.531, 1.543, 1.561
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.684, 1.688, 1.691, 1.697, 1.709, 1.722, 1.735, 1.745, 1.747, 1.745, 1.731, 1.719, 1.709, 1.705, 1.699, 1.699,
+ 1.684, 1.689, 1.694, 1.708, 1.721, 1.735, 1.747, 1.762, 1.762, 1.758, 1.745, 1.727, 1.716, 1.707, 1.701, 1.699,
+ 1.684, 1.691, 1.704, 1.719, 1.734, 1.755, 1.772, 1.786, 1.789, 1.788, 1.762, 1.745, 1.724, 1.709, 1.702, 1.698,
+ 1.682, 1.694, 1.709, 1.729, 1.755, 1.773, 1.798, 1.815, 1.817, 1.808, 1.788, 1.762, 1.733, 1.714, 1.704, 1.699,
+ 1.682, 1.693, 1.713, 1.742, 1.772, 1.798, 1.815, 1.829, 1.831, 1.821, 1.807, 1.773, 1.742, 1.716, 1.703, 1.699,
+ 1.681, 1.693, 1.713, 1.742, 1.772, 1.799, 1.828, 1.839, 1.839, 1.828, 1.807, 1.774, 1.742, 1.715, 1.699, 1.695,
+ 1.679, 1.691, 1.712, 1.739, 1.771, 1.798, 1.825, 1.829, 1.831, 1.818, 1.801, 1.774, 1.738, 1.712, 1.695, 1.691,
+ 1.676, 1.685, 1.703, 1.727, 1.761, 1.784, 1.801, 1.817, 1.817, 1.801, 1.779, 1.761, 1.729, 1.706, 1.691, 1.684,
+ 1.669, 1.678, 1.692, 1.714, 1.741, 1.764, 1.784, 1.795, 1.795, 1.779, 1.761, 1.738, 1.713, 1.696, 1.683, 1.679,
+ 1.664, 1.671, 1.679, 1.693, 1.716, 1.741, 1.762, 1.769, 1.769, 1.753, 1.738, 1.713, 1.701, 1.687, 1.681, 1.676,
+ 1.661, 1.664, 1.671, 1.679, 1.693, 1.714, 1.732, 1.739, 1.739, 1.729, 1.708, 1.701, 1.685, 1.679, 1.676, 1.677,
+ 1.659, 1.661, 1.664, 1.671, 1.679, 1.693, 1.712, 1.714, 1.714, 1.708, 1.701, 1.687, 1.679, 1.672, 1.673, 1.677
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.177, 1.183, 1.187, 1.191, 1.197, 1.206, 1.213, 1.215, 1.215, 1.215, 1.211, 1.204, 1.196, 1.191, 1.183, 1.182,
+ 1.179, 1.185, 1.191, 1.196, 1.206, 1.217, 1.224, 1.229, 1.229, 1.226, 1.221, 1.212, 1.202, 1.195, 1.188, 1.182,
+ 1.183, 1.191, 1.196, 1.206, 1.217, 1.229, 1.239, 1.245, 1.245, 1.245, 1.233, 1.221, 1.212, 1.199, 1.193, 1.187,
+ 1.183, 1.192, 1.201, 1.212, 1.229, 1.241, 1.252, 1.259, 1.259, 1.257, 1.245, 1.233, 1.217, 1.201, 1.194, 1.192,
+ 1.183, 1.192, 1.202, 1.219, 1.238, 1.252, 1.261, 1.269, 1.268, 1.261, 1.257, 1.241, 1.223, 1.204, 1.194, 1.191,
+ 1.182, 1.192, 1.202, 1.219, 1.239, 1.255, 1.266, 1.271, 1.271, 1.265, 1.258, 1.242, 1.223, 1.205, 1.192, 1.191,
+ 1.181, 1.189, 1.199, 1.218, 1.239, 1.254, 1.262, 1.268, 1.268, 1.258, 1.253, 1.241, 1.221, 1.204, 1.191, 1.187,
+ 1.179, 1.184, 1.193, 1.211, 1.232, 1.243, 1.254, 1.257, 1.256, 1.253, 1.242, 1.232, 1.216, 1.199, 1.187, 1.183,
+ 1.174, 1.179, 1.187, 1.202, 1.218, 1.232, 1.243, 1.246, 1.246, 1.239, 1.232, 1.218, 1.207, 1.191, 1.183, 1.179,
+ 1.169, 1.175, 1.181, 1.189, 1.202, 1.218, 1.229, 1.232, 1.232, 1.224, 1.218, 1.207, 1.199, 1.185, 1.181, 1.174,
+ 1.164, 1.168, 1.175, 1.179, 1.189, 1.201, 1.209, 1.213, 1.213, 1.209, 1.201, 1.198, 1.186, 1.181, 1.174, 1.173,
+ 1.161, 1.166, 1.171, 1.175, 1.179, 1.189, 1.197, 1.198, 1.198, 1.197, 1.196, 1.186, 1.182, 1.175, 1.173, 1.173
+ ]
+ },
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.166, 1.171, 1.173, 1.178, 1.187, 1.193, 1.201, 1.205, 1.205, 1.205, 1.199, 1.191, 1.184, 1.179, 1.174, 1.171,
+ 1.166, 1.172, 1.176, 1.184, 1.195, 1.202, 1.209, 1.216, 1.216, 1.213, 1.208, 1.201, 1.189, 1.182, 1.176, 1.171,
+ 1.166, 1.173, 1.183, 1.195, 1.202, 1.214, 1.221, 1.228, 1.229, 1.228, 1.221, 1.209, 1.201, 1.186, 1.179, 1.174,
+ 1.165, 1.174, 1.187, 1.201, 1.214, 1.223, 1.235, 1.241, 1.242, 1.241, 1.229, 1.221, 1.205, 1.188, 1.181, 1.177,
+ 1.165, 1.174, 1.189, 1.207, 1.223, 1.235, 1.242, 1.253, 1.252, 1.245, 1.241, 1.228, 1.211, 1.189, 1.181, 1.178,
+ 1.164, 1.173, 1.189, 1.207, 1.224, 1.238, 1.249, 1.255, 1.255, 1.249, 1.242, 1.228, 1.211, 1.191, 1.179, 1.176,
+ 1.163, 1.172, 1.187, 1.207, 1.223, 1.237, 1.245, 1.253, 1.252, 1.243, 1.237, 1.228, 1.207, 1.188, 1.176, 1.173,
+ 1.159, 1.167, 1.179, 1.199, 1.217, 1.227, 1.237, 1.241, 1.241, 1.237, 1.228, 1.217, 1.201, 1.184, 1.174, 1.169,
+ 1.156, 1.164, 1.172, 1.189, 1.205, 1.217, 1.226, 1.229, 1.229, 1.222, 1.217, 1.204, 1.192, 1.177, 1.171, 1.166,
+ 1.154, 1.159, 1.166, 1.177, 1.189, 1.205, 1.213, 1.216, 1.216, 1.209, 1.204, 1.192, 1.183, 1.172, 1.168, 1.162,
+ 1.152, 1.155, 1.161, 1.166, 1.177, 1.188, 1.195, 1.198, 1.199, 1.196, 1.187, 1.183, 1.173, 1.168, 1.163, 1.162,
+ 1.151, 1.154, 1.158, 1.162, 1.168, 1.177, 1.183, 1.184, 1.184, 1.184, 1.182, 1.172, 1.168, 1.165, 1.162, 1.161
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.236, 2.111, 1.912, 1.741, 1.579, 1.451, 1.379, 1.349, 1.349, 1.361, 1.411, 1.505, 1.644, 1.816, 2.034, 2.159,
+ 2.139, 1.994, 1.796, 1.625, 1.467, 1.361, 1.285, 1.248, 1.239, 1.265, 1.321, 1.408, 1.536, 1.703, 1.903, 2.087,
+ 2.047, 1.898, 1.694, 1.511, 1.373, 1.254, 1.186, 1.152, 1.142, 1.166, 1.226, 1.309, 1.441, 1.598, 1.799, 1.978,
+ 1.999, 1.824, 1.615, 1.429, 1.281, 1.179, 1.113, 1.077, 1.071, 1.096, 1.153, 1.239, 1.357, 1.525, 1.726, 1.915,
+ 1.976, 1.773, 1.563, 1.374, 1.222, 1.119, 1.064, 1.032, 1.031, 1.049, 1.099, 1.188, 1.309, 1.478, 1.681, 1.893,
+ 1.973, 1.756, 1.542, 1.351, 1.196, 1.088, 1.028, 1.011, 1.004, 1.029, 1.077, 1.169, 1.295, 1.459, 1.663, 1.891,
+ 1.973, 1.761, 1.541, 1.349, 1.193, 1.087, 1.031, 1.006, 1.006, 1.023, 1.075, 1.169, 1.298, 1.463, 1.667, 1.891,
+ 1.982, 1.789, 1.568, 1.373, 1.213, 1.111, 1.051, 1.029, 1.024, 1.053, 1.106, 1.199, 1.329, 1.495, 1.692, 1.903,
+ 2.015, 1.838, 1.621, 1.426, 1.268, 1.159, 1.101, 1.066, 1.068, 1.099, 1.166, 1.259, 1.387, 1.553, 1.751, 1.937,
+ 2.076, 1.911, 1.692, 1.507, 1.346, 1.236, 1.169, 1.136, 1.139, 1.174, 1.242, 1.349, 1.475, 1.641, 1.833, 2.004,
+ 2.193, 2.011, 1.798, 1.604, 1.444, 1.339, 1.265, 1.235, 1.237, 1.273, 1.351, 1.461, 1.598, 1.758, 1.956, 2.125,
+ 2.263, 2.154, 1.916, 1.711, 1.549, 1.432, 1.372, 1.356, 1.356, 1.383, 1.455, 1.578, 1.726, 1.914, 2.119, 2.211
+ ],
+ "sigma": 0.006,
+ "sigma_Cb": 0.00208
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2500,
+ "ccm":
+ [
+ 1.70741, -0.05307, -0.65433,
+ -0.62822, 1.68836, -0.06014,
+ -0.04452, -1.87628, 2.92079
+ ]
+ },
+ {
+ "ct": 2803,
+ "ccm":
+ [
+ 1.74383, -0.18731, -0.55652,
+ -0.56491, 1.67772, -0.11281,
+ -0.01522, -1.60635, 2.62157
+ ]
+ },
+ {
+ "ct": 2912,
+ "ccm":
+ [
+ 1.75215, -0.22221, -0.52995,
+ -0.54568, 1.63522, -0.08954,
+ 0.02633, -1.56997, 2.54364
+ ]
+ },
+ {
+ "ct": 2914,
+ "ccm":
+ [
+ 1.72423, -0.28939, -0.43484,
+ -0.55188, 1.62925, -0.07737,
+ 0.01959, -1.28661, 2.26702
+ ]
+ },
+ {
+ "ct": 3605,
+ "ccm":
+ [
+ 1.80381, -0.43646, -0.36735,
+ -0.46505, 1.56814, -0.10309,
+ 0.00929, -1.00424, 1.99495
+ ]
+ },
+ {
+ "ct": 4540,
+ "ccm":
+ [
+ 1.85263, -0.46545, -0.38719,
+ -0.44136, 1.68443, -0.24307,
+ 0.04108, -0.85599, 1.81491
+ ]
+ },
+ {
+ "ct": 5699,
+ "ccm":
+ [
+ 1.98595, -0.63542, -0.35054,
+ -0.34623, 1.54146, -0.19522,
+ 0.00411, -0.70936, 1.70525
+ ]
+ },
+ {
+ "ct": 8625,
+ "ccm":
+ [
+ 2.21637, -0.56663, -0.64974,
+ -0.41133, 1.96625, -0.55492,
+ -0.02307, -0.83529, 1.85837
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/ov64a40.json b/src/ipa/rpi/vc4/data/ov64a40.json
new file mode 100644
index 00000000..096f0b1e
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov64a40.json
@@ -0,0 +1,422 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 17861,
+ "reference_gain": 2.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 1073,
+ "reference_Y": 9022
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.984
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.01121
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2300.0, 1.0522, 0.4091,
+ 2700.0, 0.7884, 0.4327,
+ 3000.0, 0.7597, 0.4421,
+ 4000.0, 0.5972, 0.5404,
+ 4150.0, 0.5598, 0.5779,
+ 6500.0, 0.4388, 0.7582
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0558,
+ "transverse_neg": 0.04278
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.8,
+ "calibrations_Cr": [
+ {
+ "ct": 6500,
+ "table":
+ [
+ 2.437, 2.415, 2.392, 2.378, 2.369, 2.353, 2.344, 2.336, 2.329, 2.325, 2.325, 2.325, 2.333, 2.344, 2.366, 2.381,
+ 2.434, 2.405, 2.386, 2.369, 2.361, 2.334, 2.314, 2.302, 2.295, 2.289, 2.289, 2.303, 2.327, 2.334, 2.356, 2.378,
+ 2.434, 2.405, 2.385, 2.363, 2.334, 2.314, 2.289, 2.277, 2.269, 2.262, 2.262, 2.283, 2.303, 2.328, 2.352, 2.375,
+ 2.434, 2.405, 2.385, 2.348, 2.315, 2.289, 2.277, 2.258, 2.251, 2.242, 2.249, 2.258, 2.283, 2.321, 2.352, 2.375,
+ 2.434, 2.413, 2.385, 2.343, 2.311, 2.282, 2.258, 2.251, 2.229, 2.233, 2.242, 2.251, 2.281, 2.321, 2.356, 2.375,
+ 2.437, 2.418, 2.388, 2.343, 2.311, 2.282, 2.251, 2.229, 2.221, 2.226, 2.233, 2.251, 2.281, 2.322, 2.361, 2.381,
+ 2.444, 2.422, 2.393, 2.351, 2.314, 2.284, 2.251, 2.227, 2.221, 2.227, 2.234, 2.256, 2.287, 2.326, 2.366, 2.389,
+ 2.445, 2.424, 2.395, 2.353, 2.316, 2.287, 2.266, 2.251, 2.228, 2.234, 2.251, 2.259, 2.289, 2.331, 2.371, 2.395,
+ 2.445, 2.424, 2.399, 2.364, 2.329, 2.308, 2.287, 2.266, 2.259, 2.254, 2.259, 2.283, 2.304, 2.343, 2.375, 2.395,
+ 2.445, 2.425, 2.407, 2.385, 2.364, 2.329, 2.308, 2.299, 2.291, 2.284, 2.284, 2.304, 2.335, 2.354, 2.381, 2.399,
+ 2.449, 2.427, 2.418, 2.407, 2.385, 2.364, 2.349, 2.338, 2.333, 2.326, 2.326, 2.335, 2.354, 2.374, 2.389, 2.408,
+ 2.458, 2.441, 2.427, 2.411, 2.403, 2.395, 2.391, 2.383, 2.375, 2.369, 2.369, 2.369, 2.369, 2.385, 2.408, 2.418
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.297, 1.297, 1.289, 1.289, 1.289, 1.291, 1.293, 1.294, 1.294, 1.294, 1.294, 1.296, 1.298, 1.304, 1.312, 1.313,
+ 1.297, 1.289, 1.286, 1.286, 1.287, 1.289, 1.292, 1.294, 1.294, 1.294, 1.294, 1.294, 1.296, 1.298, 1.306, 1.312,
+ 1.289, 1.286, 1.283, 1.283, 1.285, 1.287, 1.291, 1.294, 1.294, 1.292, 1.291, 1.289, 1.293, 1.294, 1.298, 1.304,
+ 1.283, 1.282, 1.279, 1.281, 1.282, 1.285, 1.287, 1.294, 1.294, 1.291, 1.289, 1.289, 1.289, 1.293, 1.294, 1.298,
+ 1.281, 1.279, 1.279, 1.279, 1.281, 1.283, 1.287, 1.292, 1.292, 1.291, 1.291, 1.289, 1.289, 1.291, 1.294, 1.297,
+ 1.279, 1.277, 1.277, 1.279, 1.281, 1.282, 1.286, 1.289, 1.291, 1.291, 1.291, 1.291, 1.289, 1.291, 1.293, 1.297,
+ 1.277, 1.275, 1.275, 1.278, 1.279, 1.281, 1.284, 1.287, 1.289, 1.291, 1.291, 1.291, 1.289, 1.289, 1.292, 1.297,
+ 1.277, 1.275, 1.274, 1.275, 1.277, 1.278, 1.279, 1.284, 1.285, 1.285, 1.286, 1.288, 1.289, 1.289, 1.292, 1.297,
+ 1.277, 1.272, 1.272, 1.274, 1.274, 1.277, 1.279, 1.282, 1.284, 1.284, 1.285, 1.286, 1.288, 1.289, 1.292, 1.297,
+ 1.277, 1.272, 1.272, 1.273, 1.274, 1.276, 1.279, 1.282, 1.284, 1.284, 1.286, 1.286, 1.288, 1.289, 1.293, 1.297,
+ 1.279, 1.272, 1.271, 1.272, 1.274, 1.276, 1.279, 1.283, 1.284, 1.284, 1.285, 1.286, 1.288, 1.291, 1.294, 1.299,
+ 1.281, 1.274, 1.271, 1.271, 1.273, 1.276, 1.278, 1.282, 1.284, 1.284, 1.285, 1.286, 1.286, 1.291, 1.295, 1.302
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 3.811, 3.611, 3.038, 2.632, 2.291, 2.044, 1.967, 1.957, 1.957, 1.957, 2.009, 2.222, 2.541, 2.926, 3.455, 3.652,
+ 3.611, 3.135, 2.636, 2.343, 2.044, 1.846, 1.703, 1.626, 1.626, 1.671, 1.796, 1.983, 2.266, 2.549, 3.007, 3.455,
+ 3.135, 2.781, 2.343, 2.044, 1.831, 1.554, 1.411, 1.337, 1.337, 1.379, 1.502, 1.749, 1.983, 2.266, 2.671, 3.007,
+ 2.903, 2.538, 2.149, 1.831, 1.554, 1.401, 1.208, 1.145, 1.145, 1.183, 1.339, 1.502, 1.749, 2.072, 2.446, 2.801,
+ 2.812, 2.389, 2.018, 1.684, 1.401, 1.208, 1.139, 1.028, 1.028, 1.109, 1.183, 1.339, 1.604, 1.939, 2.309, 2.723,
+ 2.799, 2.317, 1.948, 1.606, 1.327, 1.139, 1.028, 1.019, 1.001, 1.021, 1.109, 1.272, 1.531, 1.869, 2.246, 2.717,
+ 2.799, 2.317, 1.948, 1.606, 1.327, 1.139, 1.027, 1.006, 1.001, 1.007, 1.109, 1.272, 1.531, 1.869, 2.246, 2.717,
+ 2.799, 2.372, 1.997, 1.661, 1.378, 1.184, 1.118, 1.019, 1.012, 1.103, 1.158, 1.326, 1.589, 1.926, 2.302, 2.717,
+ 2.884, 2.507, 2.116, 1.795, 1.511, 1.361, 1.184, 1.118, 1.118, 1.158, 1.326, 1.461, 1.726, 2.056, 2.434, 2.799,
+ 3.083, 2.738, 2.303, 1.989, 1.783, 1.511, 1.361, 1.291, 1.291, 1.337, 1.461, 1.726, 1.942, 2.251, 2.657, 2.999,
+ 3.578, 3.083, 2.589, 2.303, 1.989, 1.783, 1.637, 1.563, 1.563, 1.613, 1.743, 1.942, 2.251, 2.537, 2.999, 3.492,
+ 3.764, 3.578, 2.999, 2.583, 2.237, 1.986, 1.913, 1.905, 1.905, 1.905, 1.962, 2.196, 2.525, 2.932, 3.492, 3.659
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2300,
+ "ccm":
+ [
+ 1.77644, -0.14825, -0.62819,
+ -0.25816, 1.66348, -0.40532,
+ -0.21633, -1.95132, 3.16765
+ ]
+ },
+ {
+ "ct": 2700,
+ "ccm":
+ [
+ 1.53605, 0.03047, -0.56652,
+ -0.27159, 1.78525, -0.51366,
+ -0.13581, -1.22128, 2.35709
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 1.72928, -0.18819, -0.54108,
+ -0.44398, 2.04756, -0.60358,
+ -0.13203, -0.94711, 2.07913
+ ]
+ },
+ {
+ "ct": 4000,
+ "ccm":
+ [
+ 1.69895, -0.23055, -0.46841,
+ -0.33934, 1.80391, -0.46456,
+ -0.13902, -0.75385, 1.89287
+ ]
+ },
+ {
+ "ct": 4150,
+ "ccm":
+ [
+ 2.08494, -0.68698, -0.39796,
+ -0.37928, 1.78795, -0.40867,
+ -0.11537, -0.74686, 1.86223
+ ]
+ },
+ {
+ "ct": 6500,
+ "ccm":
+ [
+ 1.69813, -0.27304, -0.42509,
+ -0.23364, 1.87586, -0.64221,
+ -0.07587, -0.62348, 1.69935
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 3.0,
+ "max": 15.0,
+ "default": 4.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 1.0,
+ "step_fine": 0.25,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.02,
+ "pdaf_squelch": 0.125,
+ "max_slew": 2.0,
+ "pdaf_frames": 0,
+ "dropout_frames": 0,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 16,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 0, 15.0, 1023 ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/ov9281_mono.json b/src/ipa/rpi/vc4/data/ov9281_mono.json
new file mode 100644
index 00000000..a9d05a01
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov9281_mono.json
@@ -0,0 +1,136 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 2000,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 800,
+ "reference_Y": 20000
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.5
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 3.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 30000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.4,
+ 1000, 0.4
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "n_iter": 0,
+ "luminance_strength": 1.0,
+ "corner_strength": 1.5
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 0,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/se327m12.json b/src/ipa/rpi/vc4/data/se327m12.json
new file mode 100644
index 00000000..948169db
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/se327m12.json
@@ -0,0 +1,432 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 6873,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 800,
+ "reference_Y": 12293
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 1.986
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 207,
+ "slope": 0.00539
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2900.0, 0.9217, 0.3657,
+ 3600.0, 0.7876, 0.4651,
+ 4600.0, 0.6807, 0.5684,
+ 5800.0, 0.5937, 0.6724,
+ 8100.0, 0.5447, 0.7403
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0162,
+ "transverse_neg": 0.0204
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 1.481, 1.471, 1.449, 1.429, 1.416, 1.404, 1.394, 1.389, 1.389, 1.389, 1.392, 1.397, 1.404, 1.416, 1.429, 1.437,
+ 1.472, 1.456, 1.436, 1.418, 1.405, 1.394, 1.389, 1.384, 1.382, 1.382, 1.386, 1.388, 1.398, 1.407, 1.422, 1.429,
+ 1.465, 1.443, 1.426, 1.411, 1.397, 1.389, 1.383, 1.377, 1.377, 1.377, 1.379, 1.384, 1.388, 1.398, 1.411, 1.422,
+ 1.462, 1.441, 1.423, 1.409, 1.395, 1.385, 1.379, 1.376, 1.374, 1.374, 1.375, 1.379, 1.384, 1.394, 1.407, 1.418,
+ 1.461, 1.439, 1.421, 1.407, 1.394, 1.385, 1.381, 1.376, 1.373, 1.373, 1.373, 1.376, 1.381, 1.389, 1.403, 1.415,
+ 1.461, 1.439, 1.419, 1.404, 1.392, 1.384, 1.379, 1.376, 1.373, 1.372, 1.374, 1.375, 1.379, 1.389, 1.401, 1.413,
+ 1.461, 1.438, 1.419, 1.402, 1.389, 1.383, 1.377, 1.375, 1.373, 1.372, 1.372, 1.375, 1.381, 1.388, 1.401, 1.414,
+ 1.462, 1.438, 1.419, 1.403, 1.391, 1.381, 1.377, 1.374, 1.373, 1.373, 1.374, 1.376, 1.381, 1.389, 1.401, 1.414,
+ 1.462, 1.441, 1.423, 1.405, 1.392, 1.383, 1.377, 1.374, 1.373, 1.372, 1.373, 1.376, 1.382, 1.391, 1.402, 1.414,
+ 1.465, 1.444, 1.424, 1.407, 1.393, 1.382, 1.378, 1.373, 1.369, 1.369, 1.372, 1.375, 1.381, 1.389, 1.402, 1.417,
+ 1.469, 1.449, 1.427, 1.413, 1.396, 1.384, 1.381, 1.375, 1.371, 1.371, 1.373, 1.377, 1.385, 1.393, 1.407, 1.422,
+ 1.474, 1.456, 1.436, 1.419, 1.407, 1.391, 1.383, 1.379, 1.377, 1.377, 1.378, 1.381, 1.391, 1.404, 1.422, 1.426
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.742, 1.721, 1.689, 1.661, 1.639, 1.623, 1.613, 1.609, 1.607, 1.606, 1.609, 1.617, 1.626, 1.641, 1.665, 1.681,
+ 1.728, 1.703, 1.672, 1.645, 1.631, 1.614, 1.602, 1.599, 1.596, 1.597, 1.601, 1.608, 1.618, 1.631, 1.653, 1.671,
+ 1.713, 1.691, 1.658, 1.635, 1.618, 1.606, 1.595, 1.591, 1.588, 1.588, 1.591, 1.601, 1.608, 1.624, 1.641, 1.658,
+ 1.707, 1.681, 1.651, 1.627, 1.613, 1.599, 1.591, 1.585, 1.583, 1.584, 1.587, 1.591, 1.601, 1.615, 1.633, 1.655,
+ 1.699, 1.672, 1.644, 1.622, 1.606, 1.593, 1.586, 1.581, 1.579, 1.581, 1.583, 1.587, 1.597, 1.611, 1.631, 1.652,
+ 1.697, 1.665, 1.637, 1.617, 1.601, 1.589, 1.584, 1.579, 1.577, 1.578, 1.581, 1.585, 1.597, 1.607, 1.627, 1.652,
+ 1.697, 1.662, 1.634, 1.613, 1.599, 1.591, 1.583, 1.578, 1.576, 1.576, 1.579, 1.586, 1.597, 1.607, 1.628, 1.653,
+ 1.697, 1.662, 1.633, 1.613, 1.598, 1.589, 1.582, 1.578, 1.576, 1.577, 1.582, 1.589, 1.598, 1.611, 1.635, 1.655,
+ 1.701, 1.666, 1.636, 1.616, 1.602, 1.589, 1.583, 1.578, 1.577, 1.581, 1.583, 1.591, 1.601, 1.617, 1.639, 1.659,
+ 1.708, 1.671, 1.641, 1.618, 1.603, 1.591, 1.584, 1.581, 1.578, 1.581, 1.585, 1.594, 1.604, 1.622, 1.646, 1.666,
+ 1.714, 1.681, 1.648, 1.622, 1.608, 1.599, 1.591, 1.584, 1.583, 1.584, 1.589, 1.599, 1.614, 1.629, 1.653, 1.673,
+ 1.719, 1.691, 1.659, 1.631, 1.618, 1.606, 1.596, 1.591, 1.591, 1.593, 1.599, 1.608, 1.623, 1.642, 1.665, 1.681
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 2.253, 2.267, 2.289, 2.317, 2.342, 2.359, 2.373, 2.381, 2.381, 2.378, 2.368, 2.361, 2.344, 2.337, 2.314, 2.301,
+ 2.262, 2.284, 2.314, 2.335, 2.352, 2.371, 2.383, 2.391, 2.393, 2.391, 2.381, 2.368, 2.361, 2.342, 2.322, 2.308,
+ 2.277, 2.303, 2.321, 2.346, 2.364, 2.381, 2.391, 2.395, 2.397, 2.397, 2.395, 2.381, 2.367, 2.354, 2.332, 2.321,
+ 2.277, 2.304, 2.327, 2.349, 2.369, 2.388, 2.393, 2.396, 2.396, 2.398, 2.396, 2.391, 2.376, 2.359, 2.339, 2.328,
+ 2.279, 2.311, 2.327, 2.354, 2.377, 2.389, 2.393, 2.397, 2.397, 2.398, 2.395, 2.393, 2.382, 2.363, 2.344, 2.332,
+ 2.282, 2.311, 2.329, 2.354, 2.377, 2.386, 2.396, 2.396, 2.395, 2.396, 2.397, 2.394, 2.383, 2.367, 2.346, 2.333,
+ 2.283, 2.314, 2.333, 2.353, 2.375, 2.389, 2.394, 2.395, 2.395, 2.395, 2.396, 2.394, 2.386, 2.368, 2.354, 2.336,
+ 2.287, 2.309, 2.331, 2.352, 2.373, 2.386, 2.394, 2.395, 2.395, 2.396, 2.396, 2.394, 2.384, 2.371, 2.354, 2.339,
+ 2.289, 2.307, 2.326, 2.347, 2.369, 2.385, 2.392, 2.397, 2.398, 2.398, 2.397, 2.392, 2.383, 2.367, 2.352, 2.337,
+ 2.286, 2.303, 2.322, 2.342, 2.361, 2.379, 2.389, 2.394, 2.397, 2.398, 2.396, 2.389, 2.381, 2.366, 2.346, 2.332,
+ 2.284, 2.291, 2.312, 2.329, 2.351, 2.372, 2.381, 2.389, 2.393, 2.394, 2.389, 2.385, 2.374, 2.362, 2.338, 2.325,
+ 2.283, 2.288, 2.305, 2.319, 2.339, 2.365, 2.374, 2.381, 2.384, 2.386, 2.385, 2.379, 2.368, 2.342, 2.325, 2.318
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.897, 1.919, 1.941, 1.969, 1.989, 2.003, 2.014, 2.019, 2.019, 2.017, 2.014, 2.008, 1.999, 1.988, 1.968, 1.944,
+ 1.914, 1.932, 1.957, 1.982, 1.998, 2.014, 2.023, 2.029, 2.031, 2.029, 2.022, 2.014, 2.006, 1.995, 1.976, 1.955,
+ 1.925, 1.951, 1.974, 1.996, 2.013, 2.027, 2.035, 2.039, 2.039, 2.038, 2.035, 2.026, 2.015, 2.002, 1.984, 1.963,
+ 1.932, 1.958, 1.986, 2.007, 2.024, 2.034, 2.041, 2.041, 2.045, 2.045, 2.042, 2.033, 2.023, 2.009, 1.995, 1.971,
+ 1.942, 1.964, 1.994, 2.012, 2.029, 2.038, 2.043, 2.046, 2.047, 2.046, 2.045, 2.039, 2.029, 2.014, 1.997, 1.977,
+ 1.946, 1.974, 1.999, 2.015, 2.031, 2.041, 2.046, 2.047, 2.048, 2.047, 2.044, 2.041, 2.031, 2.019, 1.999, 1.978,
+ 1.948, 1.975, 2.002, 2.018, 2.031, 2.041, 2.046, 2.047, 2.048, 2.048, 2.045, 2.041, 2.029, 2.019, 1.998, 1.978,
+ 1.948, 1.973, 2.002, 2.018, 2.029, 2.042, 2.045, 2.048, 2.048, 2.048, 2.044, 2.037, 2.027, 2.014, 1.993, 1.978,
+ 1.945, 1.969, 1.998, 2.015, 2.028, 2.037, 2.045, 2.046, 2.047, 2.044, 2.039, 2.033, 2.022, 2.008, 1.989, 1.971,
+ 1.939, 1.964, 1.991, 2.011, 2.024, 2.032, 2.036, 2.042, 2.042, 2.039, 2.035, 2.024, 2.012, 1.998, 1.977, 1.964,
+ 1.932, 1.953, 1.981, 2.006, 2.016, 2.024, 2.028, 2.031, 2.034, 2.031, 2.024, 2.015, 2.005, 1.989, 1.966, 1.955,
+ 1.928, 1.944, 1.973, 1.999, 2.007, 2.016, 2.019, 2.025, 2.026, 2.025, 2.017, 2.008, 1.997, 1.975, 1.958, 1.947
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.877, 1.597, 1.397, 1.269, 1.191, 1.131, 1.093, 1.078, 1.071, 1.069, 1.086, 1.135, 1.221, 1.331, 1.474, 1.704,
+ 1.749, 1.506, 1.334, 1.229, 1.149, 1.088, 1.058, 1.053, 1.051, 1.046, 1.053, 1.091, 1.163, 1.259, 1.387, 1.587,
+ 1.661, 1.451, 1.295, 1.195, 1.113, 1.061, 1.049, 1.048, 1.047, 1.049, 1.049, 1.066, 1.124, 1.211, 1.333, 1.511,
+ 1.615, 1.411, 1.267, 1.165, 1.086, 1.052, 1.047, 1.047, 1.047, 1.049, 1.052, 1.056, 1.099, 1.181, 1.303, 1.471,
+ 1.576, 1.385, 1.252, 1.144, 1.068, 1.049, 1.044, 1.044, 1.045, 1.049, 1.053, 1.054, 1.083, 1.163, 1.283, 1.447,
+ 1.561, 1.373, 1.245, 1.135, 1.064, 1.049, 1.044, 1.044, 1.044, 1.046, 1.048, 1.054, 1.073, 1.153, 1.271, 1.432,
+ 1.571, 1.377, 1.242, 1.137, 1.066, 1.055, 1.052, 1.051, 1.051, 1.049, 1.047, 1.048, 1.068, 1.148, 1.271, 1.427,
+ 1.582, 1.396, 1.259, 1.156, 1.085, 1.068, 1.059, 1.054, 1.049, 1.045, 1.041, 1.043, 1.074, 1.157, 1.284, 1.444,
+ 1.623, 1.428, 1.283, 1.178, 1.105, 1.074, 1.069, 1.063, 1.056, 1.048, 1.046, 1.051, 1.094, 1.182, 1.311, 1.473,
+ 1.691, 1.471, 1.321, 1.213, 1.135, 1.088, 1.073, 1.069, 1.063, 1.059, 1.053, 1.071, 1.129, 1.222, 1.351, 1.521,
+ 1.808, 1.543, 1.371, 1.253, 1.174, 1.118, 1.085, 1.072, 1.067, 1.064, 1.071, 1.106, 1.176, 1.274, 1.398, 1.582,
+ 1.969, 1.666, 1.447, 1.316, 1.223, 1.166, 1.123, 1.094, 1.089, 1.097, 1.118, 1.163, 1.239, 1.336, 1.471, 1.681
+ ],
+ "sigma": 0.00218,
+ "sigma_Cb": 0.00194
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2900,
+ "ccm":
+ [
+ 1.44924, -0.12935, -0.31989,
+ -0.65839, 1.95441, -0.29602,
+ 0.18344, -1.22282, 2.03938
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 1.38736, 0.07714, -0.46451,
+ -0.59691, 1.84335, -0.24644,
+ 0.10092, -1.30441, 2.20349
+ ]
+ },
+ {
+ "ct": 3600,
+ "ccm":
+ [
+ 1.51261, -0.27921, -0.23339,
+ -0.55129, 1.83241, -0.28111,
+ 0.11649, -0.93195, 1.81546
+ ]
+ },
+ {
+ "ct": 4600,
+ "ccm":
+ [
+ 1.47082, -0.18523, -0.28559,
+ -0.48923, 1.95126, -0.46203,
+ 0.07951, -0.83987, 1.76036
+ ]
+ },
+ {
+ "ct": 5800,
+ "ccm":
+ [
+ 1.57294, -0.36229, -0.21065,
+ -0.42272, 1.80305, -0.38032,
+ 0.03671, -0.66862, 1.63191
+ ]
+ },
+ {
+ "ct": 8100,
+ "ccm":
+ [
+ 1.58803, -0.09912, -0.48891,
+ -0.42594, 2.22303, -0.79709,
+ -0.00621, -0.90516, 1.91137
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen":
+ {
+ "threshold": 2.0,
+ "strength": 0.5,
+ "limit": 0.5
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/uncalibrated.json b/src/ipa/rpi/vc4/data/uncalibrated.json
new file mode 100644
index 00000000..cdc56b32
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/uncalibrated.json
@@ -0,0 +1,131 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.awb":
+ {
+ "use_derivatives": 0,
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 3.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 30000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.4,
+ 1000, 0.4
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 4000,
+ "ccm":
+ [
+ 2.0, -1.0, 0.0,
+ -0.5, 2.0, -0.5,
+ 0, -1.0, 2.0
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 0,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/meson.build b/src/ipa/rpi/vc4/meson.build
new file mode 100644
index 00000000..590e9197
--- /dev/null
+++ b/src/ipa/rpi/vc4/meson.build
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: CC0-1.0
+
+ipa_name = 'ipa_rpi_vc4'
+
+vc4_ipa_deps = [
+ libcamera_private,
+ libatomic,
+]
+
+vc4_ipa_libs = [
+ rpi_ipa_cam_helper_lib,
+ rpi_ipa_common_lib,
+ rpi_ipa_controller_lib
+]
+
+vc4_ipa_includes = [
+ ipa_includes,
+ libipa_includes,
+]
+
+vc4_ipa_sources = files([
+ 'vc4.cpp',
+])
+
+vc4_ipa_includes += include_directories('..')
+
+mod = shared_module(ipa_name,
+ [vc4_ipa_sources, libcamera_generated_ipa_headers],
+ name_prefix : '',
+ include_directories : vc4_ipa_includes,
+ dependencies : vc4_ipa_deps,
+ link_with : libipa,
+ link_whole : vc4_ipa_libs,
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+subdir('data')
+
+ipa_names += ipa_name
diff --git a/src/ipa/rpi/vc4/vc4.cpp b/src/ipa/rpi/vc4/vc4.cpp
new file mode 100644
index 00000000..ba43e474
--- /dev/null
+++ b/src/ipa/rpi/vc4/vc4.cpp
@@ -0,0 +1,597 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * Raspberry Pi VC4/BCM2835 ISP IPA.
+ */
+
+#include <string.h>
+#include <sys/mman.h>
+
+#include <linux/bcm2835-isp.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/ipa/ipa_module_info.h>
+
+#include "common/ipa_base.h"
+#include "controller/af_status.h"
+#include "controller/agc_algorithm.h"
+#include "controller/alsc_status.h"
+#include "controller/awb_status.h"
+#include "controller/black_level_status.h"
+#include "controller/ccm_status.h"
+#include "controller/contrast_status.h"
+#include "controller/denoise_algorithm.h"
+#include "controller/denoise_status.h"
+#include "controller/dpc_status.h"
+#include "controller/geq_status.h"
+#include "controller/lux_status.h"
+#include "controller/noise_status.h"
+#include "controller/sharpen_status.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPARPI)
+
+namespace ipa::RPi {
+
+class IpaVc4 final : public IpaBase
+{
+public:
+ IpaVc4()
+ : IpaBase(), lsTable_(nullptr)
+ {
+ }
+
+ ~IpaVc4()
+ {
+ if (lsTable_)
+ munmap(lsTable_, MaxLsGridSize);
+ }
+
+private:
+ int32_t platformInit(const InitParams &params, InitResult *result) override;
+ int32_t platformStart(const ControlList &controls, StartResult *result) override;
+ int32_t platformConfigure(const ConfigParams &params, ConfigResult *result) override;
+
+ void platformPrepareIsp(const PrepareParams &params, RPiController::Metadata &rpiMetadata) override;
+ RPiController::StatisticsPtr platformProcessStats(Span<uint8_t> mem) override;
+
+ void handleControls(const ControlList &controls) override;
+ bool validateIspControls();
+
+ void applyAWB(const struct AwbStatus *awbStatus, ControlList &ctrls);
+ void applyDG(const struct AgcPrepareStatus *dgStatus, ControlList &ctrls);
+ void applyCCM(const struct CcmStatus *ccmStatus, ControlList &ctrls);
+ void applyBlackLevel(const struct BlackLevelStatus *blackLevelStatus, ControlList &ctrls);
+ void applyGamma(const struct ContrastStatus *contrastStatus, ControlList &ctrls);
+ void applyGEQ(const struct GeqStatus *geqStatus, ControlList &ctrls);
+ void applyDenoise(const struct DenoiseStatus *denoiseStatus, ControlList &ctrls);
+ void applySharpen(const struct SharpenStatus *sharpenStatus, ControlList &ctrls);
+ void applyDPC(const struct DpcStatus *dpcStatus, ControlList &ctrls);
+ void applyLS(const struct AlscStatus *lsStatus, ControlList &ctrls);
+ void applyAF(const struct AfStatus *afStatus, ControlList &lensCtrls);
+ void resampleTable(uint16_t dest[], const std::vector<double> &src, int destW, int destH);
+
+ /* VC4 ISP controls. */
+ ControlInfoMap ispCtrls_;
+
+ /* LS table allocation passed in from the pipeline handler. */
+ SharedFD lsTableHandle_;
+ void *lsTable_;
+};
+
+int32_t IpaVc4::platformInit([[maybe_unused]] const InitParams &params, [[maybe_unused]] InitResult *result)
+{
+ const std::string &target = controller_.getTarget();
+
+ if (target != "bcm2835") {
+ LOG(IPARPI, Error)
+ << "Tuning data file target returned \"" << target << "\""
+ << ", expected \"bcm2835\"";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int32_t IpaVc4::platformStart([[maybe_unused]] const ControlList &controls,
+ [[maybe_unused]] StartResult *result)
+{
+ return 0;
+}
+
+int32_t IpaVc4::platformConfigure(const ConfigParams &params, [[maybe_unused]] ConfigResult *result)
+{
+ ispCtrls_ = params.ispControls;
+ if (!validateIspControls()) {
+ LOG(IPARPI, Error) << "ISP control validation failed.";
+ return -1;
+ }
+
+ /* Store the lens shading table pointer and handle if available. */
+ if (params.lsTableHandle.isValid()) {
+ /* Remove any previous table, if there was one. */
+ if (lsTable_) {
+ munmap(lsTable_, MaxLsGridSize);
+ lsTable_ = nullptr;
+ }
+
+ /* Map the LS table buffer into user space. */
+ lsTableHandle_ = std::move(params.lsTableHandle);
+ if (lsTableHandle_.isValid()) {
+ lsTable_ = mmap(nullptr, MaxLsGridSize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, lsTableHandle_.get(), 0);
+
+ if (lsTable_ == MAP_FAILED) {
+ LOG(IPARPI, Error) << "dmaHeap mmap failure for LS table.";
+ lsTable_ = nullptr;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void IpaVc4::platformPrepareIsp([[maybe_unused]] const PrepareParams &params,
+ RPiController::Metadata &rpiMetadata)
+{
+ ControlList ctrls(ispCtrls_);
+
+ /* Lock the metadata buffer to avoid constant locks/unlocks. */
+ std::unique_lock<RPiController::Metadata> lock(rpiMetadata);
+
+ AwbStatus *awbStatus = rpiMetadata.getLocked<AwbStatus>("awb.status");
+ if (awbStatus)
+ applyAWB(awbStatus, ctrls);
+
+ CcmStatus *ccmStatus = rpiMetadata.getLocked<CcmStatus>("ccm.status");
+ if (ccmStatus)
+ applyCCM(ccmStatus, ctrls);
+
+ AgcPrepareStatus *dgStatus = rpiMetadata.getLocked<AgcPrepareStatus>("agc.prepare_status");
+ if (dgStatus)
+ applyDG(dgStatus, ctrls);
+
+ AlscStatus *lsStatus = rpiMetadata.getLocked<AlscStatus>("alsc.status");
+ if (lsStatus)
+ applyLS(lsStatus, ctrls);
+
+ ContrastStatus *contrastStatus = rpiMetadata.getLocked<ContrastStatus>("contrast.status");
+ if (contrastStatus)
+ applyGamma(contrastStatus, ctrls);
+
+ BlackLevelStatus *blackLevelStatus = rpiMetadata.getLocked<BlackLevelStatus>("black_level.status");
+ if (blackLevelStatus)
+ applyBlackLevel(blackLevelStatus, ctrls);
+
+ GeqStatus *geqStatus = rpiMetadata.getLocked<GeqStatus>("geq.status");
+ if (geqStatus)
+ applyGEQ(geqStatus, ctrls);
+
+ DenoiseStatus *denoiseStatus = rpiMetadata.getLocked<DenoiseStatus>("denoise.status");
+ if (denoiseStatus)
+ applyDenoise(denoiseStatus, ctrls);
+
+ SharpenStatus *sharpenStatus = rpiMetadata.getLocked<SharpenStatus>("sharpen.status");
+ if (sharpenStatus)
+ applySharpen(sharpenStatus, ctrls);
+
+ DpcStatus *dpcStatus = rpiMetadata.getLocked<DpcStatus>("dpc.status");
+ if (dpcStatus)
+ applyDPC(dpcStatus, ctrls);
+
+ const AfStatus *afStatus = rpiMetadata.getLocked<AfStatus>("af.status");
+ if (afStatus) {
+ ControlList lensctrls(lensCtrls_);
+ applyAF(afStatus, lensctrls);
+ if (!lensctrls.empty())
+ setLensControls.emit(lensctrls);
+ }
+
+ if (!ctrls.empty())
+ setIspControls.emit(ctrls);
+}
+
+RPiController::StatisticsPtr IpaVc4::platformProcessStats(Span<uint8_t> mem)
+{
+ using namespace RPiController;
+
+ const bcm2835_isp_stats *stats = reinterpret_cast<bcm2835_isp_stats *>(mem.data());
+ StatisticsPtr statistics = std::make_shared<Statistics>(Statistics::AgcStatsPos::PreWb,
+ Statistics::ColourStatsPos::PostLsc);
+ const Controller::HardwareConfig &hw = controller_.getHardwareConfig();
+ unsigned int i;
+
+ /* RGB histograms are not used, so do not populate them. */
+ statistics->yHist = RPiController::Histogram(stats->hist[0].g_hist,
+ hw.numHistogramBins);
+
+ /* All region sums are based on a 16-bit normalised pipeline bit-depth. */
+ unsigned int scale = Statistics::NormalisationFactorPow2 - hw.pipelineWidth;
+
+ statistics->awbRegions.init(hw.awbRegions);
+ for (i = 0; i < statistics->awbRegions.numRegions(); i++)
+ statistics->awbRegions.set(i, { { stats->awb_stats[i].r_sum << scale,
+ stats->awb_stats[i].g_sum << scale,
+ stats->awb_stats[i].b_sum << scale },
+ stats->awb_stats[i].counted,
+ stats->awb_stats[i].notcounted });
+
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Debug) << "No AGC algorithm - not copying statistics";
+ statistics->agcRegions.init(0);
+ } else {
+ statistics->agcRegions.init(hw.agcRegions);
+ const std::vector<double> &weights = agc->getWeights();
+ for (i = 0; i < statistics->agcRegions.numRegions(); i++) {
+ uint64_t rSum = (stats->agc_stats[i].r_sum << scale) * weights[i];
+ uint64_t gSum = (stats->agc_stats[i].g_sum << scale) * weights[i];
+ uint64_t bSum = (stats->agc_stats[i].b_sum << scale) * weights[i];
+ uint32_t counted = stats->agc_stats[i].counted * weights[i];
+ uint32_t notcounted = stats->agc_stats[i].notcounted * weights[i];
+ statistics->agcRegions.set(i, { { rSum, gSum, bSum },
+ counted,
+ notcounted });
+ }
+ }
+
+ statistics->focusRegions.init(hw.focusRegions);
+ for (i = 0; i < statistics->focusRegions.numRegions(); i++)
+ statistics->focusRegions.set(i, { stats->focus_stats[i].contrast_val[1][1] / 1000,
+ stats->focus_stats[i].contrast_val_num[1][1],
+ stats->focus_stats[i].contrast_val_num[1][0] });
+
+ if (statsMetadataOutput_) {
+ Span<const uint8_t> statsSpan(reinterpret_cast<const uint8_t *>(stats),
+ sizeof(bcm2835_isp_stats));
+ libcameraMetadata_.set(controls::rpi::Bcm2835StatsOutput, statsSpan);
+ }
+
+ return statistics;
+}
+
+void IpaVc4::handleControls(const ControlList &controls)
+{
+ static const std::map<int32_t, RPiController::DenoiseMode> DenoiseModeTable = {
+ { controls::draft::NoiseReductionModeOff, RPiController::DenoiseMode::Off },
+ { controls::draft::NoiseReductionModeFast, RPiController::DenoiseMode::ColourFast },
+ { controls::draft::NoiseReductionModeHighQuality, RPiController::DenoiseMode::ColourHighQuality },
+ { controls::draft::NoiseReductionModeMinimal, RPiController::DenoiseMode::ColourOff },
+ { controls::draft::NoiseReductionModeZSL, RPiController::DenoiseMode::ColourHighQuality },
+ };
+
+ for (auto const &ctrl : controls) {
+ switch (ctrl.first) {
+ case controls::draft::NOISE_REDUCTION_MODE: {
+ RPiController::DenoiseAlgorithm *sdn = dynamic_cast<RPiController::DenoiseAlgorithm *>(
+ controller_.getAlgorithm("SDN"));
+ /* Some platforms may have a combined "denoise" algorithm instead. */
+ if (!sdn)
+ sdn = dynamic_cast<RPiController::DenoiseAlgorithm *>(
+ controller_.getAlgorithm("denoise"));
+ if (!sdn) {
+ LOG(IPARPI, Warning)
+ << "Could not set NOISE_REDUCTION_MODE - no SDN algorithm";
+ return;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ auto mode = DenoiseModeTable.find(idx);
+ if (mode != DenoiseModeTable.end())
+ sdn->setMode(mode->second);
+ break;
+ }
+ }
+ }
+}
+
+bool IpaVc4::validateIspControls()
+{
+ static const uint32_t ctrls[] = {
+ V4L2_CID_RED_BALANCE,
+ V4L2_CID_BLUE_BALANCE,
+ V4L2_CID_DIGITAL_GAIN,
+ V4L2_CID_USER_BCM2835_ISP_CC_MATRIX,
+ V4L2_CID_USER_BCM2835_ISP_GAMMA,
+ V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL,
+ V4L2_CID_USER_BCM2835_ISP_GEQ,
+ V4L2_CID_USER_BCM2835_ISP_DENOISE,
+ V4L2_CID_USER_BCM2835_ISP_SHARPEN,
+ V4L2_CID_USER_BCM2835_ISP_DPC,
+ V4L2_CID_USER_BCM2835_ISP_LENS_SHADING,
+ V4L2_CID_USER_BCM2835_ISP_CDN,
+ };
+
+ for (auto c : ctrls) {
+ if (ispCtrls_.find(c) == ispCtrls_.end()) {
+ LOG(IPARPI, Error) << "Unable to find ISP control "
+ << utils::hex(c);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void IpaVc4::applyAWB(const struct AwbStatus *awbStatus, ControlList &ctrls)
+{
+ LOG(IPARPI, Debug) << "Applying WB R: " << awbStatus->gainR << " B: "
+ << awbStatus->gainB;
+
+ ctrls.set(V4L2_CID_RED_BALANCE,
+ static_cast<int32_t>(awbStatus->gainR * 1000));
+ ctrls.set(V4L2_CID_BLUE_BALANCE,
+ static_cast<int32_t>(awbStatus->gainB * 1000));
+}
+
+void IpaVc4::applyDG(const struct AgcPrepareStatus *dgStatus, ControlList &ctrls)
+{
+ ctrls.set(V4L2_CID_DIGITAL_GAIN,
+ static_cast<int32_t>(dgStatus->digitalGain * 1000));
+}
+
+void IpaVc4::applyCCM(const struct CcmStatus *ccmStatus, ControlList &ctrls)
+{
+ bcm2835_isp_custom_ccm ccm;
+
+ for (int i = 0; i < 9; i++) {
+ ccm.ccm.ccm[i / 3][i % 3].den = 1000;
+ ccm.ccm.ccm[i / 3][i % 3].num = 1000 * ccmStatus->matrix[i];
+ }
+
+ ccm.enabled = 1;
+ ccm.ccm.offsets[0] = ccm.ccm.offsets[1] = ccm.ccm.offsets[2] = 0;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&ccm),
+ sizeof(ccm) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_CC_MATRIX, c);
+}
+
+void IpaVc4::applyBlackLevel(const struct BlackLevelStatus *blackLevelStatus, ControlList &ctrls)
+{
+ bcm2835_isp_black_level blackLevel;
+
+ blackLevel.enabled = 1;
+ blackLevel.black_level_r = blackLevelStatus->blackLevelR;
+ blackLevel.black_level_g = blackLevelStatus->blackLevelG;
+ blackLevel.black_level_b = blackLevelStatus->blackLevelB;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&blackLevel),
+ sizeof(blackLevel) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL, c);
+}
+
+void IpaVc4::applyGamma(const struct ContrastStatus *contrastStatus, ControlList &ctrls)
+{
+ const unsigned int numGammaPoints = controller_.getHardwareConfig().numGammaPoints;
+ struct bcm2835_isp_gamma gamma;
+
+ for (unsigned int i = 0; i < numGammaPoints - 1; i++) {
+ int x = i < 16 ? i * 1024
+ : (i < 24 ? (i - 16) * 2048 + 16384
+ : (i - 24) * 4096 + 32768);
+ gamma.x[i] = x;
+ gamma.y[i] = std::min<uint16_t>(65535, contrastStatus->gammaCurve.eval(x));
+ }
+
+ gamma.x[numGammaPoints - 1] = 65535;
+ gamma.y[numGammaPoints - 1] = 65535;
+ gamma.enabled = 1;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&gamma),
+ sizeof(gamma) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_GAMMA, c);
+}
+
+void IpaVc4::applyGEQ(const struct GeqStatus *geqStatus, ControlList &ctrls)
+{
+ bcm2835_isp_geq geq;
+
+ geq.enabled = 1;
+ geq.offset = geqStatus->offset;
+ geq.slope.den = 1000;
+ geq.slope.num = 1000 * geqStatus->slope;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&geq),
+ sizeof(geq) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_GEQ, c);
+}
+
+void IpaVc4::applyDenoise(const struct DenoiseStatus *denoiseStatus, ControlList &ctrls)
+{
+ using RPiController::DenoiseMode;
+
+ bcm2835_isp_denoise denoise;
+ DenoiseMode mode = static_cast<DenoiseMode>(denoiseStatus->mode);
+
+ denoise.enabled = mode != DenoiseMode::Off;
+ denoise.constant = denoiseStatus->noiseConstant;
+ denoise.slope.num = 1000 * denoiseStatus->noiseSlope;
+ denoise.slope.den = 1000;
+ denoise.strength.num = 1000 * denoiseStatus->strength;
+ denoise.strength.den = 1000;
+
+ /* Set the CDN mode to match the SDN operating mode. */
+ bcm2835_isp_cdn cdn;
+ switch (mode) {
+ case DenoiseMode::ColourFast:
+ cdn.enabled = 1;
+ cdn.mode = CDN_MODE_FAST;
+ break;
+ case DenoiseMode::ColourHighQuality:
+ cdn.enabled = 1;
+ cdn.mode = CDN_MODE_HIGH_QUALITY;
+ break;
+ default:
+ cdn.enabled = 0;
+ }
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&denoise),
+ sizeof(denoise) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_DENOISE, c);
+
+ c = ControlValue(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&cdn),
+ sizeof(cdn) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_CDN, c);
+}
+
+void IpaVc4::applySharpen(const struct SharpenStatus *sharpenStatus, ControlList &ctrls)
+{
+ bcm2835_isp_sharpen sharpen;
+
+ sharpen.enabled = 1;
+ sharpen.threshold.num = 1000 * sharpenStatus->threshold;
+ sharpen.threshold.den = 1000;
+ sharpen.strength.num = 1000 * sharpenStatus->strength;
+ sharpen.strength.den = 1000;
+ sharpen.limit.num = 1000 * sharpenStatus->limit;
+ sharpen.limit.den = 1000;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&sharpen),
+ sizeof(sharpen) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_SHARPEN, c);
+}
+
+void IpaVc4::applyDPC(const struct DpcStatus *dpcStatus, ControlList &ctrls)
+{
+ bcm2835_isp_dpc dpc;
+
+ dpc.enabled = 1;
+ dpc.strength = dpcStatus->strength;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&dpc),
+ sizeof(dpc) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_DPC, c);
+}
+
+void IpaVc4::applyLS(const struct AlscStatus *lsStatus, ControlList &ctrls)
+{
+ /*
+ * Program lens shading tables into pipeline.
+ * Choose smallest cell size that won't exceed 63x48 cells.
+ */
+ const int cellSizes[] = { 16, 32, 64, 128, 256 };
+ unsigned int numCells = std::size(cellSizes);
+ unsigned int i, w, h, cellSize;
+ for (i = 0; i < numCells; i++) {
+ cellSize = cellSizes[i];
+ w = (mode_.width + cellSize - 1) / cellSize;
+ h = (mode_.height + cellSize - 1) / cellSize;
+ if (w < 64 && h <= 48)
+ break;
+ }
+
+ if (i == numCells) {
+ LOG(IPARPI, Error) << "Cannot find cell size";
+ return;
+ }
+
+ /* We're going to supply corner sampled tables, 16 bit samples. */
+ w++, h++;
+ bcm2835_isp_lens_shading ls = {
+ .enabled = 1,
+ .grid_cell_size = cellSize,
+ .grid_width = w,
+ .grid_stride = w,
+ .grid_height = h,
+ /* .dmabuf will be filled in by pipeline handler. */
+ .dmabuf = 0,
+ .ref_transform = 0,
+ .corner_sampled = 1,
+ .gain_format = GAIN_FORMAT_U4P10
+ };
+
+ if (!lsTable_ || w * h * 4 * sizeof(uint16_t) > MaxLsGridSize) {
+ LOG(IPARPI, Error) << "Do not have a correctly allocate lens shading table!";
+ return;
+ }
+
+ if (lsStatus) {
+ /* Format will be u4.10 */
+ uint16_t *grid = static_cast<uint16_t *>(lsTable_);
+
+ resampleTable(grid, lsStatus->r, w, h);
+ resampleTable(grid + w * h, lsStatus->g, w, h);
+ memcpy(grid + 2 * w * h, grid + w * h, w * h * sizeof(uint16_t));
+ resampleTable(grid + 3 * w * h, lsStatus->b, w, h);
+ }
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&ls),
+ sizeof(ls) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING, c);
+}
+
+void IpaVc4::applyAF(const struct AfStatus *afStatus, ControlList &lensCtrls)
+{
+ if (afStatus->lensSetting) {
+ ControlValue v(afStatus->lensSetting.value());
+ lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE, v);
+ }
+}
+
+/*
+ * Resamples a 16x12 table with central sampling to destW x destH with corner
+ * sampling.
+ */
+void IpaVc4::resampleTable(uint16_t dest[], const std::vector<double> &src,
+ int destW, int destH)
+{
+ /*
+ * Precalculate and cache the x sampling locations and phases to
+ * save recomputing them on every row.
+ */
+ assert(destW > 1 && destH > 1 && destW <= 64);
+ int xLo[64], xHi[64];
+ double xf[64];
+ double x = -0.5, xInc = 16.0 / (destW - 1);
+ for (int i = 0; i < destW; i++, x += xInc) {
+ xLo[i] = floor(x);
+ xf[i] = x - xLo[i];
+ xHi[i] = xLo[i] < 15 ? xLo[i] + 1 : 15;
+ xLo[i] = xLo[i] > 0 ? xLo[i] : 0;
+ }
+
+ /* Now march over the output table generating the new values. */
+ double y = -0.5, yInc = 12.0 / (destH - 1);
+ for (int j = 0; j < destH; j++, y += yInc) {
+ int yLo = floor(y);
+ double yf = y - yLo;
+ int yHi = yLo < 11 ? yLo + 1 : 11;
+ yLo = yLo > 0 ? yLo : 0;
+ double const *rowAbove = src.data() + yLo * 16;
+ double const *rowBelow = src.data() + yHi * 16;
+ for (int i = 0; i < destW; i++) {
+ double above = rowAbove[xLo[i]] * (1 - xf[i]) + rowAbove[xHi[i]] * xf[i];
+ double below = rowBelow[xLo[i]] * (1 - xf[i]) + rowBelow[xHi[i]] * xf[i];
+ int result = floor(1024 * (above * (1 - yf) + below * yf) + .5);
+ *(dest++) = result > 16383 ? 16383 : result; /* want u4.10 */
+ }
+ }
+}
+
+} /* namespace ipa::RPi */
+
+/*
+ * External IPA module interface
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 1,
+ "rpi/vc4",
+ "rpi/vc4",
+};
+
+IPAInterface *ipaCreate()
+{
+ return new ipa::RPi::IpaVc4();
+}
+
+} /* extern "C" */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/black_level.cpp b/src/ipa/simple/black_level.cpp
new file mode 100644
index 00000000..53b4c040
--- /dev/null
+++ b/src/ipa/simple/black_level.cpp
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * black level handling
+ */
+
+#include "black_level.h"
+
+#include <numeric>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPASoftBL)
+
+/**
+ * \class BlackLevel
+ * \brief Object providing black point level for software ISP
+ *
+ * Black level can be provided in hardware tuning files or, if no tuning file is
+ * available for the given hardware, guessed automatically, with less accuracy.
+ * As tuning files are not yet implemented for software ISP, BlackLevel
+ * currently provides only guessed black levels.
+ *
+ * This class serves for tracking black level as a property of the underlying
+ * hardware, not as means of enhancing a particular scene or image.
+ *
+ * The class is supposed to be instantiated for the given camera stream.
+ * The black level can be retrieved using BlackLevel::get() method. It is
+ * initially 0 and may change when updated using BlackLevel::update() method.
+ */
+
+BlackLevel::BlackLevel()
+ : blackLevel_(255), blackLevelSet_(false)
+{
+}
+
+/**
+ * \brief Return the current black level
+ *
+ * \return The black level, in the range from 0 (minimum) to 255 (maximum).
+ * If the black level couldn't be determined yet, return 0.
+ */
+unsigned int BlackLevel::get() const
+{
+ return blackLevelSet_ ? blackLevel_ : 0;
+}
+
+/**
+ * \brief Update black level from the provided histogram
+ * \param[in] yHistogram The histogram to be used for updating black level
+ *
+ * The black level is property of the given hardware, not image. It is updated
+ * only if it has not been yet set or if it is lower than the lowest value seen
+ * so far.
+ */
+void BlackLevel::update(SwIspStats::Histogram &yHistogram)
+{
+ /*
+ * The constant is selected to be "good enough", not overly conservative or
+ * aggressive. There is no magic about the given value.
+ */
+ constexpr float ignoredPercentage_ = 0.02;
+ const unsigned int total =
+ std::accumulate(begin(yHistogram), end(yHistogram), 0);
+ const unsigned int pixelThreshold = ignoredPercentage_ * total;
+ const unsigned int histogramRatio = 256 / SwIspStats::kYHistogramSize;
+ const unsigned int currentBlackIdx = blackLevel_ / histogramRatio;
+
+ for (unsigned int i = 0, seen = 0;
+ i < currentBlackIdx && i < SwIspStats::kYHistogramSize;
+ i++) {
+ seen += yHistogram[i];
+ if (seen >= pixelThreshold) {
+ blackLevel_ = i * histogramRatio;
+ blackLevelSet_ = true;
+ LOG(IPASoftBL, Debug)
+ << "Auto-set black level: "
+ << i << "/" << SwIspStats::kYHistogramSize
+ << " (" << 100 * (seen - yHistogram[i]) / total << "% below, "
+ << 100 * seen / total << "% at or below)";
+ break;
+ }
+ };
+}
+} /* namespace libcamera */
diff --git a/src/ipa/simple/black_level.h b/src/ipa/simple/black_level.h
new file mode 100644
index 00000000..25094217
--- /dev/null
+++ b/src/ipa/simple/black_level.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * black level handling
+ */
+
+#pragma once
+
+#include <array>
+
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class BlackLevel
+{
+public:
+ BlackLevel();
+ unsigned int get() const;
+ void update(SwIspStats::Histogram &yHistogram);
+
+private:
+ unsigned int blackLevel_;
+ bool blackLevelSet_;
+};
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/data/meson.build b/src/ipa/simple/data/meson.build
new file mode 100644
index 00000000..92795ee4
--- /dev/null
+++ b/src/ipa/simple/data/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'uncalibrated.yaml',
+])
+
+# The install_dir must match the name from the IPAModuleInfo
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'simple',
+ install_tag : 'runtime')
diff --git a/src/ipa/simple/data/uncalibrated.yaml b/src/ipa/simple/data/uncalibrated.yaml
new file mode 100644
index 00000000..ff981a1a
--- /dev/null
+++ b/src/ipa/simple/data/uncalibrated.yaml
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+...
diff --git a/src/ipa/simple/meson.build b/src/ipa/simple/meson.build
new file mode 100644
index 00000000..44b5f1d7
--- /dev/null
+++ b/src/ipa/simple/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: CC0-1.0
+
+ipa_name = 'ipa_soft_simple'
+
+soft_simple_sources = files([
+ 'soft_simple.cpp',
+ 'black_level.cpp',
+])
+
+mod = shared_module(ipa_name,
+ [soft_simple_sources, libcamera_generated_ipa_headers],
+ name_prefix : '',
+ include_directories : [ipa_includes, libipa_includes],
+ dependencies : libcamera_private,
+ link_with : libipa,
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+subdir('data')
+
+ipa_names += ipa_name
diff --git a/src/ipa/simple/soft_simple.cpp b/src/ipa/simple/soft_simple.cpp
new file mode 100644
index 00000000..a5bb2bbf
--- /dev/null
+++ b/src/ipa/simple/soft_simple.cpp
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple Software Image Processing Algorithm module
+ */
+
+#include <sys/mman.h>
+
+#include <linux/v4l2-controls.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/soft_ipa_interface.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/camera_sensor_helper.h"
+
+#include "black_level.h"
+
+namespace libcamera {
+LOG_DEFINE_CATEGORY(IPASoft)
+
+namespace ipa::soft {
+
+/*
+ * The number of bins to use for the optimal exposure calculations.
+ */
+static constexpr unsigned int kExposureBinsCount = 5;
+
+/*
+ * The exposure is optimal when the mean sample value of the histogram is
+ * in the middle of the range.
+ */
+static constexpr float kExposureOptimal = kExposureBinsCount / 2.0;
+
+/*
+ * The below value implements the hysteresis for the exposure adjustment.
+ * It is small enough to have the exposure close to the optimal, and is big
+ * enough to prevent the exposure from wobbling around the optimal value.
+ */
+static constexpr float kExposureSatisfactory = 0.2;
+
+class IPASoftSimple : public ipa::soft::IPASoftInterface
+{
+public:
+ IPASoftSimple()
+ : params_(nullptr), stats_(nullptr), blackLevel_(BlackLevel()),
+ ignoreUpdates_(0)
+ {
+ }
+
+ ~IPASoftSimple();
+
+ int init(const IPASettings &settings,
+ const SharedFD &fdStats,
+ const SharedFD &fdParams,
+ const ControlInfoMap &sensorInfoMap) override;
+ int configure(const ControlInfoMap &sensorInfoMap) override;
+
+ int start() override;
+ void stop() override;
+
+ void processStats(const ControlList &sensorControls) override;
+
+private:
+ void updateExposure(double exposureMSV);
+
+ DebayerParams *params_;
+ SwIspStats *stats_;
+ std::unique_ptr<CameraSensorHelper> camHelper_;
+ ControlInfoMap sensorInfoMap_;
+ BlackLevel blackLevel_;
+
+ int32_t exposureMin_, exposureMax_;
+ int32_t exposure_;
+ double againMin_, againMax_, againMinStep_;
+ double again_;
+ unsigned int ignoreUpdates_;
+};
+
+IPASoftSimple::~IPASoftSimple()
+{
+ if (stats_)
+ munmap(stats_, sizeof(SwIspStats));
+ if (params_)
+ munmap(params_, sizeof(DebayerParams));
+}
+
+int IPASoftSimple::init(const IPASettings &settings,
+ const SharedFD &fdStats,
+ const SharedFD &fdParams,
+ const ControlInfoMap &sensorInfoMap)
+{
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!camHelper_) {
+ LOG(IPASoft, Warning)
+ << "Failed to create camera sensor helper for "
+ << settings.sensorModel;
+ }
+
+ /* Load the tuning data file */
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPASoft, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ /* \todo Use the IPA configuration file for real. */
+ unsigned int version = (*data)["version"].get<uint32_t>(0);
+ LOG(IPASoft, Debug) << "Tuning file version " << version;
+
+ params_ = nullptr;
+ stats_ = nullptr;
+
+ if (!fdStats.isValid()) {
+ LOG(IPASoft, Error) << "Invalid Statistics handle";
+ return -ENODEV;
+ }
+
+ if (!fdParams.isValid()) {
+ LOG(IPASoft, Error) << "Invalid Parameters handle";
+ return -ENODEV;
+ }
+
+ {
+ void *mem = mmap(nullptr, sizeof(DebayerParams), PROT_WRITE,
+ MAP_SHARED, fdParams.get(), 0);
+ if (mem == MAP_FAILED) {
+ LOG(IPASoft, Error) << "Unable to map Parameters";
+ return -errno;
+ }
+
+ params_ = static_cast<DebayerParams *>(mem);
+ }
+
+ {
+ void *mem = mmap(nullptr, sizeof(SwIspStats), PROT_READ,
+ MAP_SHARED, fdStats.get(), 0);
+ if (mem == MAP_FAILED) {
+ LOG(IPASoft, Error) << "Unable to map Statistics";
+ return -errno;
+ }
+
+ stats_ = static_cast<SwIspStats *>(mem);
+ }
+
+ /*
+ * Check if the sensor driver supports the controls required by the
+ * Soft IPA.
+ * Don't save the min and max control values yet, as e.g. the limits
+ * for V4L2_CID_EXPOSURE depend on the configured sensor resolution.
+ */
+ if (sensorInfoMap.find(V4L2_CID_EXPOSURE) == sensorInfoMap.end()) {
+ LOG(IPASoft, Error) << "Don't have exposure control";
+ return -EINVAL;
+ }
+
+ if (sensorInfoMap.find(V4L2_CID_ANALOGUE_GAIN) == sensorInfoMap.end()) {
+ LOG(IPASoft, Error) << "Don't have gain control";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int IPASoftSimple::configure(const ControlInfoMap &sensorInfoMap)
+{
+ sensorInfoMap_ = sensorInfoMap;
+
+ const ControlInfo &exposureInfo = sensorInfoMap_.find(V4L2_CID_EXPOSURE)->second;
+ const ControlInfo &gainInfo = sensorInfoMap_.find(V4L2_CID_ANALOGUE_GAIN)->second;
+
+ exposureMin_ = exposureInfo.min().get<int32_t>();
+ exposureMax_ = exposureInfo.max().get<int32_t>();
+ if (!exposureMin_) {
+ LOG(IPASoft, Warning) << "Minimum exposure is zero, that can't be linear";
+ exposureMin_ = 1;
+ }
+
+ int32_t againMin = gainInfo.min().get<int32_t>();
+ int32_t againMax = gainInfo.max().get<int32_t>();
+
+ if (camHelper_) {
+ againMin_ = camHelper_->gain(againMin);
+ againMax_ = camHelper_->gain(againMax);
+ againMinStep_ = (againMax_ - againMin_) / 100.0;
+ } else {
+ /*
+ * The camera sensor gain (g) is usually not equal to the value written
+ * into the gain register (x). But the way how the AGC algorithm changes
+ * the gain value to make the total exposure closer to the optimum
+ * assumes that g(x) is not too far from linear function. If the minimal
+ * gain is 0, the g(x) is likely to be far from the linear, like
+ * g(x) = a / (b * x + c). To avoid unexpected changes to the gain by
+ * the AGC algorithm (abrupt near one edge, and very small near the
+ * other) we limit the range of the gain values used.
+ */
+ againMax_ = againMax;
+ if (!againMin) {
+ LOG(IPASoft, Warning)
+ << "Minimum gain is zero, that can't be linear";
+ againMin_ = std::min(100, againMin / 2 + againMax / 2);
+ }
+ againMinStep_ = 1.0;
+ }
+
+ LOG(IPASoft, Info) << "Exposure " << exposureMin_ << "-" << exposureMax_
+ << ", gain " << againMin_ << "-" << againMax_
+ << " (" << againMinStep_ << ")";
+
+ return 0;
+}
+
+int IPASoftSimple::start()
+{
+ return 0;
+}
+
+void IPASoftSimple::stop()
+{
+}
+
+void IPASoftSimple::processStats(const ControlList &sensorControls)
+{
+ /*
+ * Calculate red and blue gains for AWB.
+ * Clamp max gain at 4.0, this also avoids 0 division.
+ */
+ if (stats_->sumR_ <= stats_->sumG_ / 4)
+ params_->gainR = 1024;
+ else
+ params_->gainR = 256 * stats_->sumG_ / stats_->sumR_;
+
+ if (stats_->sumB_ <= stats_->sumG_ / 4)
+ params_->gainB = 1024;
+ else
+ params_->gainB = 256 * stats_->sumG_ / stats_->sumB_;
+
+ /* Green gain and gamma values are fixed */
+ params_->gainG = 256;
+ params_->gamma = 0.5;
+
+ if (ignoreUpdates_ > 0)
+ blackLevel_.update(stats_->yHistogram);
+ params_->blackLevel = blackLevel_.get();
+
+ setIspParams.emit();
+
+ /* \todo Switch to the libipa/algorithm.h API someday. */
+
+ /*
+ * AE / AGC, use 2 frames delay to make sure that the exposure and
+ * the gain set have applied to the camera sensor.
+ * \todo This could be handled better with DelayedControls.
+ */
+ if (ignoreUpdates_ > 0) {
+ --ignoreUpdates_;
+ return;
+ }
+
+ /*
+ * Calculate Mean Sample Value (MSV) according to formula from:
+ * https://www.araa.asn.au/acra/acra2007/papers/paper84final.pdf
+ */
+ const unsigned int blackLevelHistIdx =
+ params_->blackLevel / (256 / SwIspStats::kYHistogramSize);
+ const unsigned int histogramSize =
+ SwIspStats::kYHistogramSize - blackLevelHistIdx;
+ const unsigned int yHistValsPerBin = histogramSize / kExposureBinsCount;
+ const unsigned int yHistValsPerBinMod =
+ histogramSize / (histogramSize % kExposureBinsCount + 1);
+ int exposureBins[kExposureBinsCount] = {};
+ unsigned int denom = 0;
+ unsigned int num = 0;
+
+ for (unsigned int i = 0; i < histogramSize; i++) {
+ unsigned int idx = (i - (i / yHistValsPerBinMod)) / yHistValsPerBin;
+ exposureBins[idx] += stats_->yHistogram[blackLevelHistIdx + i];
+ }
+
+ for (unsigned int i = 0; i < kExposureBinsCount; i++) {
+ LOG(IPASoft, Debug) << i << ": " << exposureBins[i];
+ denom += exposureBins[i];
+ num += exposureBins[i] * (i + 1);
+ }
+
+ float exposureMSV = static_cast<float>(num) / denom;
+
+ /* Sanity check */
+ if (!sensorControls.contains(V4L2_CID_EXPOSURE) ||
+ !sensorControls.contains(V4L2_CID_ANALOGUE_GAIN)) {
+ LOG(IPASoft, Error) << "Control(s) missing";
+ return;
+ }
+
+ exposure_ = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ int32_t again = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>();
+ again_ = camHelper_ ? camHelper_->gain(again) : again;
+
+ updateExposure(exposureMSV);
+
+ ControlList ctrls(sensorInfoMap_);
+
+ ctrls.set(V4L2_CID_EXPOSURE, exposure_);
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN,
+ static_cast<int32_t>(camHelper_ ? camHelper_->gainCode(again_) : again_));
+
+ ignoreUpdates_ = 2;
+
+ setSensorControls.emit(ctrls);
+
+ LOG(IPASoft, Debug) << "exposureMSV " << exposureMSV
+ << " exp " << exposure_ << " again " << again_
+ << " gain R/B " << params_->gainR << "/" << params_->gainB
+ << " black level " << params_->blackLevel;
+}
+
+void IPASoftSimple::updateExposure(double exposureMSV)
+{
+ /*
+ * kExpDenominator of 10 gives ~10% increment/decrement;
+ * kExpDenominator of 5 - about ~20%
+ */
+ static constexpr uint8_t kExpDenominator = 10;
+ static constexpr uint8_t kExpNumeratorUp = kExpDenominator + 1;
+ static constexpr uint8_t kExpNumeratorDown = kExpDenominator - 1;
+
+ double next;
+
+ if (exposureMSV < kExposureOptimal - kExposureSatisfactory) {
+ next = exposure_ * kExpNumeratorUp / kExpDenominator;
+ if (next - exposure_ < 1)
+ exposure_ += 1;
+ else
+ exposure_ = next;
+ if (exposure_ >= exposureMax_) {
+ next = again_ * kExpNumeratorUp / kExpDenominator;
+ if (next - again_ < againMinStep_)
+ again_ += againMinStep_;
+ else
+ again_ = next;
+ }
+ }
+
+ if (exposureMSV > kExposureOptimal + kExposureSatisfactory) {
+ if (exposure_ == exposureMax_ && again_ > againMin_) {
+ next = again_ * kExpNumeratorDown / kExpDenominator;
+ if (again_ - next < againMinStep_)
+ again_ -= againMinStep_;
+ else
+ again_ = next;
+ } else {
+ next = exposure_ * kExpNumeratorDown / kExpDenominator;
+ if (exposure_ - next < 1)
+ exposure_ -= 1;
+ else
+ exposure_ = next;
+ }
+ }
+
+ exposure_ = std::clamp(exposure_, exposureMin_, exposureMax_);
+ again_ = std::clamp(again_, againMin_, againMax_);
+}
+
+} /* namespace ipa::soft */
+
+/*
+ * External IPA module interface
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 0,
+ "simple",
+ "simple",
+};
+
+IPAInterface *ipaCreate()
+{
+ return new ipa::soft::IPASoftSimple();
+}
+
+} /* extern "C" */
+
+} /* namespace libcamera */
diff --git a/src/ipa/vimc/data/meson.build b/src/ipa/vimc/data/meson.build
index 42ec651c..628d6a29 100644
--- a/src/ipa/vimc/data/meson.build
+++ b/src/ipa/vimc/data/meson.build
@@ -5,4 +5,5 @@ conf_files = files([
])
install_data(conf_files,
- install_dir : ipa_data_dir / 'vimc')
+ install_dir : ipa_data_dir / 'vimc',
+ install_tag : 'runtime')
diff --git a/src/ipa/vimc/meson.build b/src/ipa/vimc/meson.build
index ecbeee13..264a2d9a 100644
--- a/src/ipa/vimc/meson.build
+++ b/src/ipa/vimc/meson.build
@@ -21,3 +21,5 @@ if ipa_sign_module
endif
subdir('data')
+
+ipa_names += ipa_name
diff --git a/src/ipa/vimc/vimc.cpp b/src/ipa/vimc/vimc.cpp
index 85afb279..ebd63fa6 100644
--- a/src/ipa/vimc/vimc.cpp
+++ b/src/ipa/vimc/vimc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_vimc.cpp - Vimc Image Processing Algorithm module
+ * Vimc Image Processing Algorithm module
*/
#include <libcamera/ipa/vimc_ipa_interface.h>
@@ -31,7 +31,10 @@ public:
IPAVimc();
~IPAVimc();
- int init(const IPASettings &settings) override;
+ int init(const IPASettings &settings,
+ const ipa::vimc::IPAOperationCode code,
+ const Flags<ipa::vimc::TestFlag> inFlags,
+ Flags<ipa::vimc::TestFlag> *outFlags) override;
int start() override;
void stop() override;
@@ -66,7 +69,10 @@ IPAVimc::~IPAVimc()
::close(fd_);
}
-int IPAVimc::init(const IPASettings &settings)
+int IPAVimc::init(const IPASettings &settings,
+ const ipa::vimc::IPAOperationCode code,
+ const Flags<ipa::vimc::TestFlag> inFlags,
+ Flags<ipa::vimc::TestFlag> *outFlags)
{
trace(ipa::vimc::IPAOperationInit);
@@ -74,6 +80,15 @@ int IPAVimc::init(const IPASettings &settings)
<< "initializing vimc IPA with configuration file "
<< settings.configurationFile;
+ LOG(IPAVimc, Debug) << "Got opcode " << code;
+
+ LOG(IPAVimc, Debug)
+ << "Flag 2 was "
+ << (inFlags & ipa::vimc::TestFlag::Flag2 ? "" : "not ")
+ << "set";
+
+ *outFlags |= ipa::vimc::TestFlag::Flag1;
+
File conf(settings.configurationFile);
if (!conf.open(File::OpenModeFlag::ReadOnly)) {
LOG(IPAVimc, Error) << "Failed to open configuration file";
@@ -142,7 +157,8 @@ void IPAVimc::fillParamsBuffer([[maybe_unused]] uint32_t frame, uint32_t bufferI
return;
}
- paramsBufferReady.emit(bufferId);
+ Flags<ipa::vimc::TestFlag> flags;
+ paramsBufferReady.emit(bufferId, flags);
}
void IPAVimc::initTrace()
@@ -152,7 +168,7 @@ void IPAVimc::initTrace()
if (ret)
return;
- ret = ::open(ipa::vimc::VimcIPAFIFOPath.c_str(), O_WRONLY);
+ ret = ::open(ipa::vimc::VimcIPAFIFOPath.c_str(), O_WRONLY | O_CLOEXEC);
if (ret < 0) {
ret = errno;
LOG(IPAVimc, Error) << "Failed to open vimc IPA test FIFO: "
@@ -184,7 +200,7 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
0,
- "PipelineHandlerVimc",
+ "vimc",
"vimc",
};
diff --git a/src/libcamera/base/backtrace.cpp b/src/libcamera/base/backtrace.cpp
index 483492c3..0b04629c 100644
--- a/src/libcamera/base/backtrace.cpp
+++ b/src/libcamera/base/backtrace.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * backtrace.h - Call stack backtraces
+ * Call stack backtraces
*/
#include <libcamera/base/backtrace.h>
@@ -191,10 +191,22 @@ __attribute__((__noinline__))
bool Backtrace::unwindTrace()
{
#if HAVE_UNWIND
+/*
+ * unw_getcontext() for ARM32 is an inline assembly function using the stmia
+ * instruction to store SP and PC. This is considered by clang-11 as deprecated,
+ * and generates a warning.
+ */
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winline-asm"
+#endif
unw_context_t uc;
int ret = unw_getcontext(&uc);
if (ret)
return false;
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
unw_cursor_t cursor;
ret = unw_init_local(&cursor, &uc);
diff --git a/src/libcamera/base/bound_method.cpp b/src/libcamera/base/bound_method.cpp
index 3ecec51c..322029a8 100644
--- a/src/libcamera/base/bound_method.cpp
+++ b/src/libcamera/base/bound_method.cpp
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * bound_method.cpp - Method bind and invocation
+ * Method bind and invocation
*/
#include <libcamera/base/bound_method.h>
#include <libcamera/base/message.h>
+#include <libcamera/base/object.h>
#include <libcamera/base/semaphore.h>
#include <libcamera/base/thread.h>
diff --git a/src/libcamera/base/class.cpp b/src/libcamera/base/class.cpp
index 9c2d9f21..61998398 100644
--- a/src/libcamera/base/class.cpp
+++ b/src/libcamera/base/class.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * class.cpp - Utilities and helpers for classes
+ * Utilities and helpers for classes
*/
#include <libcamera/base/class.h>
diff --git a/src/libcamera/base/event_dispatcher.cpp b/src/libcamera/base/event_dispatcher.cpp
index 4be89e81..5f4a5cb4 100644
--- a/src/libcamera/base/event_dispatcher.cpp
+++ b/src/libcamera/base/event_dispatcher.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher.cpp - Event dispatcher
+ * Event dispatcher
*/
#include <libcamera/base/event_dispatcher.h>
diff --git a/src/libcamera/base/event_dispatcher_poll.cpp b/src/libcamera/base/event_dispatcher_poll.cpp
index 7238a316..b737ca7a 100644
--- a/src/libcamera/base/event_dispatcher_poll.cpp
+++ b/src/libcamera/base/event_dispatcher_poll.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher_poll.cpp - Poll-based event dispatcher
+ * Poll-based event dispatcher
*/
#include <libcamera/base/event_dispatcher_poll.h>
diff --git a/src/libcamera/base/event_notifier.cpp b/src/libcamera/base/event_notifier.cpp
index fd93c087..495c281d 100644
--- a/src/libcamera/base/event_notifier.cpp
+++ b/src/libcamera/base/event_notifier.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_notifier.cpp - File descriptor event notifier
+ * File descriptor event notifier
*/
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/thread.h>
@@ -20,6 +21,8 @@
namespace libcamera {
+LOG_DECLARE_CATEGORY(Event)
+
/**
* \class EventNotifier
* \brief Notify of activity on a file descriptor
@@ -104,6 +107,9 @@ EventNotifier::~EventNotifier()
*/
void EventNotifier::setEnabled(bool enable)
{
+ if (!assertThreadBound("EventNotifier can't be enabled from another thread"))
+ return;
+
if (enabled_ == enable)
return;
diff --git a/src/libcamera/base/file.cpp b/src/libcamera/base/file.cpp
index fb3e276d..2b83a517 100644
--- a/src/libcamera/base/file.cpp
+++ b/src/libcamera/base/file.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * file.cpp - File I/O operations
+ * File I/O operations
*/
#include <libcamera/base/file.h>
@@ -163,6 +163,9 @@ bool File::exists() const
* attempt to create the file with initial permissions set to 0666 (modified by
* the process' umask).
*
+ * The file is opened with the O_CLOEXEC flag, and will be closed automatically
+ * when a new binary is executed with one of the exec(3) functions.
+ *
* The error() status is updated.
*
* \return True on success, false otherwise
@@ -178,7 +181,7 @@ bool File::open(File::OpenMode mode)
if (mode & OpenModeFlag::WriteOnly)
flags |= O_CREAT;
- fd_ = UniqueFD(::open(name_.c_str(), flags, 0666));
+ fd_ = UniqueFD(::open(name_.c_str(), flags | O_CLOEXEC, 0666));
if (!fd_.isValid()) {
error_ = -errno;
return false;
diff --git a/src/libcamera/base/flags.cpp b/src/libcamera/base/flags.cpp
index 3e4320ac..9981f2ed 100644
--- a/src/libcamera/base/flags.cpp
+++ b/src/libcamera/base/flags.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * flags.cpp - Type-safe enum-based bitfields
+ * Type-safe enum-based bitfields
*/
#include <libcamera/base/flags.h>
diff --git a/src/libcamera/base/log.cpp b/src/libcamera/base/log.cpp
index 5c359a22..3a656b8f 100644
--- a/src/libcamera/base/log.cpp
+++ b/src/libcamera/base/log.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * log.cpp - Logging infrastructure
+ * Logging infrastructure
*/
#include <libcamera/base/log.h>
@@ -21,6 +21,7 @@
#include <libcamera/logging.h>
#include <libcamera/base/backtrace.h>
+#include <libcamera/base/mutex.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
@@ -314,10 +315,11 @@ private:
friend LogCategory;
void registerCategory(LogCategory *category);
+ LogCategory *findCategory(const char *name) const;
static bool destroyed_;
- std::unordered_set<LogCategory *> categories_;
+ std::vector<LogCategory *> categories_;
std::list<std::pair<std::string, LogSeverity>> levels_;
std::shared_ptr<LogOutput> output_;
@@ -568,7 +570,7 @@ void Logger::logSetLevel(const char *category, const char *level)
return;
for (LogCategory *c : categories_) {
- if (!strcmp(c->name(), category)) {
+ if (c->name() == category) {
c->setSeverity(severity);
break;
}
@@ -707,12 +709,12 @@ LogSeverity Logger::parseLogLevel(const std::string &level)
* \brief Register a log category with the logger
* \param[in] category The log category
*
- * Log categories must have unique names. If a category with the same name
- * already exists this function performs no operation.
+ * Log categories must have unique names. It is invalid to call this function
+ * if a log category with the same name already exists.
*/
void Logger::registerCategory(LogCategory *category)
{
- categories_.insert(category);
+ categories_.push_back(category);
const std::string &name = category->name();
for (const std::pair<std::string, LogSeverity> &level : levels_) {
@@ -737,6 +739,22 @@ void Logger::registerCategory(LogCategory *category)
}
/**
+ * \brief Find an existing log category with the given name
+ * \param[in] name Name of the log category
+ * \return The pointer to the found log category or nullptr if not found
+ */
+LogCategory *Logger::findCategory(const char *name) const
+{
+ if (auto it = std::find_if(categories_.begin(), categories_.end(),
+ [name](auto c) { return c->name() == name; });
+ it != categories_.end()) {
+ return *it;
+ }
+
+ return nullptr;
+}
+
+/**
* \enum LogSeverity
* Log message severity
* \var LogDebug
@@ -761,13 +779,35 @@ void Logger::registerCategory(LogCategory *category)
*/
/**
+ * \brief Create a new LogCategory or return an existing one
+ * \param[in] name Name of the log category
+ *
+ * Create and return a new LogCategory with the given name if such a category
+ * does not yet exist, or return the existing one.
+ *
+ * \return The pointer to the LogCategory
+ */
+LogCategory *LogCategory::create(const char *name)
+{
+ static Mutex mutex_;
+ MutexLocker locker(mutex_);
+ LogCategory *category = Logger::instance()->findCategory(name);
+
+ if (!category) {
+ category = new LogCategory(name);
+ Logger::instance()->registerCategory(category);
+ }
+
+ return category;
+}
+
+/**
* \brief Construct a log category
* \param[in] name The category name
*/
LogCategory::LogCategory(const char *name)
: name_(name), severity_(LogSeverity::LogInfo)
{
- Logger::instance()->registerCategory(this);
}
/**
@@ -804,7 +844,7 @@ void LogCategory::setSeverity(LogSeverity severity)
*/
const LogCategory &LogCategory::defaultCategory()
{
- static const LogCategory *category = new LogCategory("default");
+ static const LogCategory *category = LogCategory::create("default");
return *category;
}
diff --git a/src/libcamera/base/meson.build b/src/libcamera/base/meson.build
index 7030ad1f..7a7fd7e4 100644
--- a/src/libcamera/base/meson.build
+++ b/src/libcamera/base/meson.build
@@ -22,8 +22,8 @@ libcamera_base_sources = files([
'utils.cpp',
])
-libdw = cc.find_library('libdw', required : false)
-libunwind = cc.find_library('libunwind', required : false)
+libdw = dependency('libdw', required : false)
+libunwind = dependency('libunwind', required : false)
if cc.has_header_symbol('execinfo.h', 'backtrace')
config_h.set('HAVE_BACKTRACE', 1)
@@ -38,9 +38,9 @@ if libunwind.found()
endif
libcamera_base_deps = [
- dependency('threads'),
libatomic,
libdw,
+ libthreads,
libunwind,
]
@@ -51,6 +51,7 @@ libcamera_base_args = [ '-DLIBCAMERA_BASE_PRIVATE' ]
libcamera_base_lib = shared_library('libcamera-base',
[libcamera_base_sources, libcamera_base_headers],
version : libcamera_version,
+ soversion : libcamera_soversion,
name_prefix : '',
install : true,
cpp_args : libcamera_base_args,
diff --git a/src/libcamera/base/message.cpp b/src/libcamera/base/message.cpp
index 2da2a7ed..098faac6 100644
--- a/src/libcamera/base/message.cpp
+++ b/src/libcamera/base/message.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * message.cpp - Message queue support
+ * Message queue support
*/
#include <libcamera/base/message.h>
diff --git a/src/libcamera/base/mutex.cpp b/src/libcamera/base/mutex.cpp
index e34e8618..2a4542c4 100644
--- a/src/libcamera/base/mutex.cpp
+++ b/src/libcamera/base/mutex.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mutex.cpp - Mutex classes with clang thread safety annotation
+ * Mutex classes with clang thread safety annotation
*/
#include <libcamera/base/mutex.h>
diff --git a/src/libcamera/base/object.cpp b/src/libcamera/base/object.cpp
index 92cecd22..745d2565 100644
--- a/src/libcamera/base/object.cpp
+++ b/src/libcamera/base/object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * object.cpp - Base object
+ * Base object
*/
#include <libcamera/base/object.h>
@@ -40,8 +40,9 @@ LOG_DEFINE_CATEGORY(Object)
* Object class.
*
* Deleting an object from a thread other than the one the object is bound to is
- * unsafe, unless the caller ensures that the object isn't processing any
- * message concurrently.
+ * unsafe, unless the caller ensures that the object's thread is stopped and no
+ * parent or child of the object gets deleted concurrently. See
+ * Object::~Object() for more information.
*
* Object slots connected to signals will also run in the context of the
* object's thread, regardless of whether the signal is emitted in the same or
@@ -84,9 +85,20 @@ Object::Object(Object *parent)
* Object instances shall be destroyed from the thread they are bound to,
* otherwise undefined behaviour may occur. If deletion of an Object needs to
* be scheduled from a different thread, deleteLater() shall be used.
+ *
+ * As an exception to this rule, Object instances may be deleted from a
+ * different thread if the thread the instance is bound to is stopped through
+ * the whole duration of the object's destruction, *and* the parent and children
+ * of the object do not get deleted concurrently. The caller is responsible for
+ * fulfilling those requirements.
+ *
+ * In all cases Object instances shall be deleted before the Thread they are
+ * bound to.
*/
Object::~Object()
{
+ ASSERT(Thread::current() == thread_ || !thread_->isRunning());
+
/*
* Move signals to a private list to avoid concurrent iteration and
* deletion of items from Signal::disconnect().
@@ -116,8 +128,9 @@ Object::~Object()
* event loop that the object belongs to. This ensures the object is destroyed
* from the right context, as required by the libcamera threading model.
*
- * If this function is called before the thread's event loop is started, the
- * object will be deleted when the event loop starts.
+ * If this function is called before the thread's event loop is started or after
+ * it has stopped, the object will be deleted when the event loop (re)starts. If
+ * this never occurs, the object will be leaked.
*
* Deferred deletion can be used to control the destruction context with shared
* pointers. An object managed with shared pointers is deleted when the last
@@ -213,6 +226,35 @@ void Object::message(Message *msg)
}
/**
+ * \fn Object::assertThreadBound()
+ * \brief Check if the caller complies with thread-bound constraints
+ * \param[in] message The message to be printed on error
+ *
+ * This function verifies the calling constraints required by the \threadbound
+ * definition. It shall be called at the beginning of member functions of an
+ * Object subclass that are explicitly marked as thread-bound in their
+ * documentation.
+ *
+ * If the thread-bound constraints are not met, the function prints \a message
+ * as an error message. For debug builds, it additionally causes an assertion
+ * error.
+ *
+ * \todo Verify the thread-bound requirements for functions marked as
+ * thread-bound at the class level.
+ *
+ * \return True if the call is thread-bound compliant, false otherwise
+ */
+bool Object::assertThreadBound(const char *message)
+{
+ if (Thread::current() == thread_)
+ return true;
+
+ LOG(Object, Error) << message;
+ ASSERT(false);
+ return false;
+}
+
+/**
* \fn R Object::invokeMethod()
* \brief Invoke a method asynchronously on an Object instance
* \param[in] func The object method to invoke
@@ -259,11 +301,12 @@ void Object::message(Message *msg)
* Moving an object that has a parent is not allowed, and causes undefined
* behaviour.
*
- * \context This function is thread-bound.
+ * \context This function is \threadbound.
*/
void Object::moveToThread(Thread *thread)
{
- ASSERT(Thread::current() == thread_);
+ if (!assertThreadBound("Object can't be moved from another thread"))
+ return;
if (thread_ == thread)
return;
diff --git a/src/libcamera/base/semaphore.cpp b/src/libcamera/base/semaphore.cpp
index 4fe30293..862f3b31 100644
--- a/src/libcamera/base/semaphore.cpp
+++ b/src/libcamera/base/semaphore.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * semaphore.cpp - General-purpose counting semaphore
+ * General-purpose counting semaphore
*/
#include <libcamera/base/semaphore.h>
@@ -56,7 +56,9 @@ unsigned int Semaphore::available()
void Semaphore::acquire(unsigned int n)
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return available_ >= n; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return available_ >= n;
+ });
available_ -= n;
}
diff --git a/src/libcamera/base/shared_fd.cpp b/src/libcamera/base/shared_fd.cpp
index c711cf57..7afc8ca5 100644
--- a/src/libcamera/base/shared_fd.cpp
+++ b/src/libcamera/base/shared_fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * shared_fd.cpp - File descriptor wrapper with shared ownership
+ * File descriptor wrapper with shared ownership
*/
#include <libcamera/base/shared_fd.h>
diff --git a/src/libcamera/base/signal.cpp b/src/libcamera/base/signal.cpp
index a46386a0..b782e050 100644
--- a/src/libcamera/base/signal.cpp
+++ b/src/libcamera/base/signal.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal.cpp - Signal & slot implementation
+ * Signal & slot implementation
*/
#include <libcamera/base/signal.h>
#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
/**
* \file base/signal.h
@@ -74,7 +75,7 @@ SignalBase::SlotList SignalBase::slots()
*
* Signals and slots are a language construct aimed at communication between
* objects through the observer pattern without the need for boilerplate code.
- * See http://doc.qt.io/qt-5/signalsandslots.html for more information.
+ * See http://doc.qt.io/qt-6/signalsandslots.html for more information.
*
* Signals model events that can be observed from objects unrelated to the event
* source. Slots are functions that are called in response to a signal. Signals
diff --git a/src/libcamera/base/thread.cpp b/src/libcamera/base/thread.cpp
index 6bda9d14..72733431 100644
--- a/src/libcamera/base/thread.cpp
+++ b/src/libcamera/base/thread.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * thread.cpp - Thread support
+ * Thread support
*/
#include <libcamera/base/thread.h>
@@ -18,6 +18,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
/**
* \page thread Thread Support
@@ -151,7 +152,7 @@ private:
friend class ThreadMain;
Thread *thread_;
- bool running_;
+ bool running_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
pid_t tid_;
Mutex mutex_;
@@ -370,6 +371,12 @@ void Thread::run()
void Thread::finishThread()
{
+ /*
+ * Objects may have been scheduled for deletion right before the thread
+ * exited. Ensure they get deleted now, before the thread stops.
+ */
+ dispatchMessages(Message::Type::DeferredDelete);
+
data_->mutex_.lock();
data_->running_ = false;
data_->mutex_.unlock();
@@ -422,11 +429,15 @@ bool Thread::wait(utils::duration duration)
{
MutexLocker locker(data_->mutex_);
+ auto isRunning = ([&]() LIBCAMERA_TSA_REQUIRES(data_->mutex_) {
+ return !data_->running_;
+ });
+
if (duration == utils::duration::max())
- data_->cv_.wait(locker, [&]() { return !data_->running_; });
+ data_->cv_.wait(locker, isRunning);
else
hasFinished = data_->cv_.wait_for(locker, duration,
- [&]() { return !data_->running_; });
+ isRunning);
}
if (thread_.joinable())
diff --git a/src/libcamera/base/timer.cpp b/src/libcamera/base/timer.cpp
index 74b060af..7b0f3725 100644
--- a/src/libcamera/base/timer.cpp
+++ b/src/libcamera/base/timer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer.cpp - Generic timer
+ * Generic timer
*/
#include <libcamera/base/timer.h>
@@ -85,10 +85,8 @@ void Timer::start(std::chrono::milliseconds duration)
*/
void Timer::start(std::chrono::steady_clock::time_point deadline)
{
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer " << this << " << can't be started from another thread";
+ if (!assertThreadBound("Timer can't be started from another thread"))
return;
- }
deadline_ = deadline;
@@ -114,13 +112,11 @@ void Timer::start(std::chrono::steady_clock::time_point deadline)
*/
void Timer::stop()
{
- if (!isRunning())
+ if (!assertThreadBound("Timer can't be stopped from another thread"))
return;
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer " << this << " can't be stopped from another thread";
+ if (!isRunning())
return;
- }
unregisterTimer();
}
diff --git a/src/libcamera/base/unique_fd.cpp b/src/libcamera/base/unique_fd.cpp
index 83d6919c..d0649e4d 100644
--- a/src/libcamera/base/unique_fd.cpp
+++ b/src/libcamera/base/unique_fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * unique_fd.cpp - File descriptor wrapper that owns a file descriptor
+ * File descriptor wrapper that owns a file descriptor
*/
#include <libcamera/base/unique_fd.h>
diff --git a/src/libcamera/base/utils.cpp b/src/libcamera/base/utils.cpp
index 6a307940..ccb31063 100644
--- a/src/libcamera/base/utils.cpp
+++ b/src/libcamera/base/utils.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * utils.cpp - Miscellaneous utility functions
+ * Miscellaneous utility functions
*/
#include <libcamera/base/utils.h>
#include <iomanip>
+#include <locale.h>
#include <sstream>
#include <stdlib.h>
#include <string.h>
@@ -463,6 +464,73 @@ std::string toAscii(const std::string &str)
* \a b
*/
+#if HAVE_LOCALE_T
+
+namespace {
+
+/*
+ * RAII wrapper around locale_t instances, to support global locale instances
+ * without leaking memory.
+ */
+class Locale
+{
+public:
+ Locale(const char *locale)
+ {
+ locale_ = newlocale(LC_ALL_MASK, locale, static_cast<locale_t>(0));
+ }
+
+ ~Locale()
+ {
+ freelocale(locale_);
+ }
+
+ locale_t locale() { return locale_; }
+
+private:
+ locale_t locale_;
+};
+
+Locale cLocale("C");
+
+} /* namespace */
+
+#endif /* HAVE_LOCALE_T */
+
+/**
+ * \brief Convert a string to a double independently of the current locale
+ * \param[in] nptr The string to convert
+ * \param[out] endptr Pointer to trailing portion of the string after conversion
+ *
+ * This function is a locale-independent version of the std::strtod() function.
+ * It behaves as the standard function, but uses the "C" locale instead of the
+ * current locale.
+ *
+ * \return The converted value, if any, or 0.0 if the conversion failed.
+ */
+double strtod(const char *__restrict nptr, char **__restrict endptr)
+{
+#if HAVE_LOCALE_T
+ return strtod_l(nptr, endptr, cLocale.locale());
+#else
+ /*
+ * If the libc implementation doesn't provide locale object support,
+ * assume that strtod() is locale-independent.
+ */
+ return ::strtod(nptr, endptr);
+#endif
+}
+
+/**
+ * \fn to_underlying(Enum e)
+ * \brief Convert an enumeration to its underlygin type
+ * \param[in] e Enumeration value to convert
+ *
+ * This function is equivalent to the C++23 std::to_underlying().
+ *
+ * \return The value of e converted to its underlying type
+ */
+
} /* namespace utils */
#ifndef __DOXYGEN__
diff --git a/src/libcamera/bayer_format.cpp b/src/libcamera/bayer_format.cpp
index 4882707e..014f716d 100644
--- a/src/libcamera/bayer_format.cpp
+++ b/src/libcamera/bayer_format.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * bayer_format.cpp - Class to represent Bayer formats
+ * Class to represent Bayer formats
*/
#include "libcamera/internal/bayer_format.h"
@@ -61,6 +61,10 @@ namespace libcamera {
* \brief Format uses MIPI CSI-2 style packing
* \var BayerFormat::Packing::IPU3
* \brief Format uses IPU3 style packing
+ * \var BayerFormat::Packing::PISP1
+ * \brief Format uses PISP mode 1 compression
+ * \var BayerFormat::Packing::PISP2
+ * \brief Format uses PISP mode 2 compression
*/
namespace {
@@ -140,6 +144,22 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
{ formats::SGRBG12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P) } },
{ { BayerFormat::RGGB, 12, BayerFormat::Packing::CSI2 },
{ formats::SRGGB12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::None },
+ { formats::SBGGR14, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::None },
+ { formats::SGBRG14, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::None },
+ { formats::SGRBG14, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::None },
+ { formats::SRGGB14, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P) } },
{ { BayerFormat::BGGR, 16, BayerFormat::Packing::None },
{ formats::SBGGR16, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16) } },
{ { BayerFormat::GBRG, 16, BayerFormat::Packing::None },
@@ -148,12 +168,26 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
{ formats::SGRBG16, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16) } },
{ { BayerFormat::RGGB, 16, BayerFormat::Packing::None },
{ formats::SRGGB16, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16) } },
+ { { BayerFormat::BGGR, 16, BayerFormat::Packing::PISP1 },
+ { formats::BGGR_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR) } },
+ { { BayerFormat::GBRG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GBRG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG) } },
+ { { BayerFormat::GRBG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GRBG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG) } },
+ { { BayerFormat::RGGB, 16, BayerFormat::Packing::PISP1 },
+ { formats::RGGB_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB) } },
{ { BayerFormat::MONO, 8, BayerFormat::Packing::None },
{ formats::R8, V4L2PixelFormat(V4L2_PIX_FMT_GREY) } },
{ { BayerFormat::MONO, 10, BayerFormat::Packing::None },
{ formats::R10, V4L2PixelFormat(V4L2_PIX_FMT_Y10) } },
{ { BayerFormat::MONO, 10, BayerFormat::Packing::CSI2 },
{ formats::R10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y10P) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::None },
+ { formats::R12, V4L2PixelFormat(V4L2_PIX_FMT_Y12) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::None },
+ { formats::R16, V4L2PixelFormat(V4L2_PIX_FMT_Y16) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::PISP1 },
+ { formats::MONO_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO) } },
};
const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
@@ -191,6 +225,8 @@ const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
{ MEDIA_BUS_FMT_SRGGB16_1X16, { BayerFormat::RGGB, 16, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y8_1X8, { BayerFormat::MONO, 8, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y10_1X10, { BayerFormat::MONO, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y12_1X12, { BayerFormat::MONO, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y16_1X16, { BayerFormat::MONO, 16, BayerFormat::Packing::None } },
};
} /* namespace */
@@ -281,6 +317,10 @@ std::ostream &operator<<(std::ostream &out, const BayerFormat &f)
out << "-CSI2P";
else if (f.packing == BayerFormat::Packing::IPU3)
out << "-IPU3P";
+ else if (f.packing == BayerFormat::Packing::PISP1)
+ out << "-PISP1";
+ else if (f.packing == BayerFormat::Packing::PISP2)
+ out << "-PISP2";
return out;
}
@@ -355,11 +395,14 @@ BayerFormat BayerFormat::fromPixelFormat(PixelFormat format)
* \brief Apply a transform to this BayerFormat
* \param[in] t The transform to apply
*
- * Appplying a transform to an image stored in a Bayer format affects the Bayer
- * order. For example, performing a horizontal flip on the Bayer pattern
- * RGGB causes the RG rows of pixels to become GR, and the GB rows to become BG.
- * The transformed image would have a GRBG order. The bit depth and modifiers
- * are not affected.
+ * Applying a transform to an image stored in a Bayer format affects the Bayer
+ * order. For example, performing a horizontal flip on the Bayer pattern RGGB
+ * causes the RG rows of pixels to become GR, and the GB rows to become BG. The
+ * transformed image would have a GRBG order. Performing a vertical flip on the
+ * Bayer pattern RGGB causes the GB rows to come before the RG ones and the
+ * transformed image would have GBRG order. Applying both vertical and
+ * horizontal flips on the Bayer patter RGGB results in transformed images with
+ * BGGR order. The bit depth and modifiers are not affected.
*
* Horizontal and vertical flips are applied before transpose.
*
@@ -374,8 +417,11 @@ BayerFormat BayerFormat::transform(Transform t) const
/*
* Observe that flipping bit 0 of the Order enum performs a horizontal
- * mirror on the Bayer pattern (e.g. RGGB goes to GRBG). Similarly,
- * flipping bit 1 performs a vertical mirror operation on it. Hence:
+ * mirror on the Bayer pattern (e.g. RG/GB goes to GR/BG). Similarly,
+ * flipping bit 1 performs a vertical mirror operation on it (e.g RG/GB
+ * goes to GB/RG). Applying both vertical and horizontal flips
+ * combines vertical and horizontal mirroring on the Bayer pattern
+ * (e.g. RG/GB goes to BG/GR). Hence:
*/
if (!!(t & Transform::HFlip))
result.order = static_cast<Order>(result.order ^ 1);
diff --git a/src/libcamera/byte_stream_buffer.cpp b/src/libcamera/byte_stream_buffer.cpp
index 881cd371..fba9a6f3 100644
--- a/src/libcamera/byte_stream_buffer.cpp
+++ b/src/libcamera/byte_stream_buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * byte_stream_buffer.cpp - Byte stream buffer
+ * Byte stream buffer
*/
#include "libcamera/internal/byte_stream_buffer.h"
diff --git a/src/libcamera/camera.cpp b/src/libcamera/camera.cpp
index 713543fd..67f34901 100644
--- a/src/libcamera/camera.cpp
+++ b/src/libcamera/camera.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera.cpp - Camera device
+ * Camera device
*/
#include <libcamera/camera.h>
@@ -97,6 +97,16 @@
* implemented in the above order at the hardware level. The libcamera pipeline
* handlers translate the pipeline model to the real hardware configuration.
*
+ * \subsection camera-sensor-model Camera Sensor Model
+ *
+ * By default, libcamera configures the camera sensor automatically based on the
+ * configuration of the streams. Applications may instead specify a manual
+ * configuration for the camera sensor. This allows precise control of the frame
+ * geometry and frame rate delivered by the sensor.
+ *
+ * More details about the camera sensor model implemented by libcamera are
+ * available in the libcamera camera-sensor-model documentation page.
+ *
* \subsection digital-zoom Digital Zoom
*
* Digital zoom is implemented as a combination of the cropping and scaling
@@ -112,6 +122,127 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(Camera)
/**
+ * \class SensorConfiguration
+ * \brief Camera sensor configuration
+ *
+ * The SensorConfiguration class collects parameters to control the operations
+ * of the camera sensor, according to the abstract camera sensor model
+ * implemented by libcamera.
+ *
+ * \todo Applications shall fully populate all fields of the
+ * CameraConfiguration::sensorConfig class members before validating the
+ * CameraConfiguration. If the SensorConfiguration is not fully populated, or if
+ * any of its parameters cannot be applied to the sensor in use, the
+ * CameraConfiguration validation process will fail and return
+ * CameraConfiguration::Status::Invalid.
+ *
+ * Applications that populate the SensorConfiguration class members are
+ * expected to be highly-specialized applications that know what sensor
+ * they are operating with and what parameters are valid for the sensor in use.
+ *
+ * A detailed description of the abstract camera sensor model implemented by
+ * libcamera and the description of its configuration parameters is available
+ * in the libcamera documentation camera-sensor-model file.
+ */
+
+/**
+ * \var SensorConfiguration::bitDepth
+ * \brief The sensor image format bit depth
+ *
+ * The number of bits (resolution) used to represent a pixel sample.
+ */
+
+/**
+ * \var SensorConfiguration::analogCrop
+ * \brief The analog crop rectangle
+ *
+ * The selected portion of the active pixel array used to produce the image
+ * frame.
+ */
+
+/**
+ * \var SensorConfiguration::binning
+ * \brief Sensor binning configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the binning operations. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::binX
+ * \brief Horizontal binning factor
+ *
+ * The horizontal binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::binY
+ * \brief Vertical binning factor
+ *
+ * The vertical binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::skipping
+ * \brief The sensor skipping configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the skipping operations.
+ *
+ * If no skipping is performed, all the structure fields should be
+ * set to 1. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::xOddInc
+ * \brief Horizontal increment for odd rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::xEvenInc
+ * \brief Horizontal increment for even rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yOddInc
+ * \brief Vertical increment for odd columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yEvenInc
+ * \brief Vertical increment for even columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::outputSize
+ * \brief The frame output (visible) size
+ *
+ * The size of the data frame as received by the host processor.
+ */
+
+/**
+ * \brief Check if the sensor configuration is valid
+ *
+ * A sensor configuration is valid if it's fully populated.
+ *
+ * \todo For now allow applications to populate the bitDepth and the outputSize
+ * only as skipping and binnings factors are initialized to 1 and the analog
+ * crop is ignored.
+ *
+ * \return True if the sensor configuration is valid, false otherwise
+ */
+bool SensorConfiguration::isValid() const
+{
+ if (bitDepth && binning.binX && binning.binY &&
+ skipping.xOddInc && skipping.yOddInc &&
+ skipping.xEvenInc && skipping.yEvenInc &&
+ !outputSize.isNull())
+ return true;
+
+ return false;
+}
+
+/**
* \class CameraConfiguration
* \brief Hold configuration for streams of the camera
@@ -160,7 +291,7 @@ LOG_DECLARE_CATEGORY(Camera)
* \brief Create an empty camera configuration
*/
CameraConfiguration::CameraConfiguration()
- : transform(Transform::Identity), config_({})
+ : orientation(Orientation::Rotate0), config_({})
{
}
@@ -184,12 +315,12 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
* This function adjusts the camera configuration to the closest valid
* configuration and returns the validation status.
*
- * \todo: Define exactly when to return each status code. Should stream
+ * \todo Define exactly when to return each status code. Should stream
* parameters set to 0 by the caller be adjusted without returning Adjusted ?
* This would potentially be useful for applications but would get in the way
* in Camera::configure(). Do we need an extra status code to signal this ?
*
- * \todo: Handle validation of buffers count when refactoring the buffers API.
+ * \todo Handle validation of buffers count when refactoring the buffers API.
*
* \return A CameraConfiguration::Status value that describes the validation
* status.
@@ -317,17 +448,6 @@ std::size_t CameraConfiguration::size() const
return config_.size();
}
-namespace {
-
-bool isRaw(const PixelFormat &pixFmt)
-{
- const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
- return info.isValid() &&
- info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
-}
-
-} /* namespace */
-
/**
* \enum CameraConfiguration::ColorSpaceFlag
* \brief Specify the behaviour of validateColorSpaces
@@ -358,8 +478,8 @@ bool isRaw(const PixelFormat &pixFmt)
* \return A CameraConfiguration::Status value that describes the validation
* status.
* \retval CameraConfigutation::Adjusted The configuration has been adjusted
- * and is now valid. The color space of some or all of the streams may bave
- * benn changed. The caller shall check the color spaces carefully.
+ * and is now valid. The color space of some or all of the streams may have
+ * been changed. The caller shall check the color spaces carefully.
* \retval CameraConfiguration::Valid The configuration was already valid and
* hasn't been adjusted.
*/
@@ -368,29 +488,33 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF
Status status = Valid;
/*
- * Set all raw streams to the Raw color space, and make a note of the largest
- * non-raw stream with a defined color space (if there is one).
+ * Set all raw streams to the Raw color space, and make a note of the
+ * largest non-raw stream with a defined color space (if there is one).
*/
- int index = -1;
- for (auto [i, cfg] : utils::enumerate(config_)) {
- if (isRaw(cfg.pixelFormat)) {
- if (cfg.colorSpace != ColorSpace::Raw) {
- cfg.colorSpace = ColorSpace::Raw;
- status = Adjusted;
- }
- } else if (cfg.colorSpace && (index == -1 || cfg.size > config_[i].size)) {
- index = i;
+ std::optional<ColorSpace> colorSpace;
+ Size size;
+
+ for (StreamConfiguration &cfg : config_) {
+ if (!cfg.colorSpace)
+ continue;
+
+ if (cfg.colorSpace->adjust(cfg.pixelFormat))
+ status = Adjusted;
+
+ if (cfg.colorSpace != ColorSpace::Raw && cfg.size > size) {
+ colorSpace = cfg.colorSpace;
+ size = cfg.size;
}
}
- if (index < 0 || !(flags & ColorSpaceFlag::StreamsShareColorSpace))
+ if (!colorSpace || !(flags & ColorSpaceFlag::StreamsShareColorSpace))
return status;
/* Make all output color spaces the same, if requested. */
for (auto &cfg : config_) {
- if (!isRaw(cfg.pixelFormat) &&
- cfg.colorSpace != config_[index].colorSpace) {
- cfg.colorSpace = config_[index].colorSpace;
+ if (cfg.colorSpace != ColorSpace::Raw &&
+ cfg.colorSpace != colorSpace) {
+ cfg.colorSpace = colorSpace;
status = Adjusted;
}
}
@@ -399,17 +523,35 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF
}
/**
- * \var CameraConfiguration::transform
- * \brief User-specified transform to be applied to the image
+ * \var CameraConfiguration::sensorConfig
+ * \brief The camera sensor configuration
+ *
+ * The sensorConfig member allows manual control of the configuration of the
+ * camera sensor. By default, if sensorConfig is not set, the camera will
+ * configure the sensor automatically based on the configuration of the streams.
+ * Applications can override this by manually specifying the full sensor
+ * configuration.
+ *
+ * Refer to the camera-sensor-model documentation and to the SensorConfiguration
+ * class documentation for details about the sensor configuration process.
+ *
+ * The camera sensor configuration applies to all streams produced by a camera
+ * from the same image source.
+ */
+
+/**
+ * \var CameraConfiguration::orientation
+ * \brief The desired orientation of the images produced by the camera
+ *
+ * The orientation field is a user-specified 2D plane transformation that
+ * specifies how the application wants the camera images to be rotated in
+ * the memory buffers.
*
- * The transform is a user-specified 2D plane transform that will be applied
- * to the camera images by the processing pipeline before being handed to
- * the application. This is subsequent to any transform that is already
- * required to fix up any platform-defined rotation.
+ * If the orientation requested by the application cannot be obtained, the
+ * camera will not rotate or flip the images, and the validate() function will
+ * Adjust this value to the native image orientation produced by the camera.
*
- * The usual 2D plane transforms are allowed here (horizontal/vertical
- * flips, multiple of 90-degree rotations etc.), but the validate() function
- * may adjust this field at its discretion if the selection is not supported.
+ * By default the orientation field is set to Orientation::Rotate0.
*/
/**
@@ -497,7 +639,7 @@ Camera::Private::~Private()
* facilitate debugging of internal request usage.
*
* The requestSequence_ tracks the number of requests queued to a camera
- * over its lifetime.
+ * over a single capture session.
*/
static const char *const camera_state_names[] = {
@@ -508,6 +650,11 @@ static const char *const camera_state_names[] = {
"Running",
};
+bool Camera::Private::isAcquired() const
+{
+ return state_.load(std::memory_order_acquire) != CameraAvailable;
+}
+
bool Camera::Private::isRunning() const
{
return state_.load(std::memory_order_acquire) == CameraRunning;
@@ -811,7 +958,7 @@ int Camera::exportFrameBuffers(Stream *stream,
* not blocking, if the device has already been acquired (by the same or another
* process) the -EBUSY error code is returned.
*
- * Acquiring a camera will limit usage of any other camera(s) provided by the
+ * Acquiring a camera may limit usage of any other camera(s) provided by the
* same pipeline handler to the same instance of libcamera. The limit is in
* effect until all cameras from the pipeline handler are released. Other
* instances of libcamera can still list and examine the cameras but will fail
@@ -839,7 +986,7 @@ int Camera::acquire()
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- if (!d->pipe_->lock()) {
+ if (!d->pipe_->acquire()) {
LOG(Camera, Info)
<< "Pipeline handler in use by another process";
return -EBUSY;
@@ -873,7 +1020,8 @@ int Camera::release()
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- d->pipe_->unlock();
+ if (d->isAcquired())
+ d->pipe_->release(this);
d->setState(Private::CameraAvailable);
@@ -937,10 +1085,9 @@ const std::set<Stream *> &Camera::streams() const
* \context This function is \threadsafe.
*
* \return A CameraConfiguration if the requested roles can be satisfied, or a
- * null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * null pointer otherwise.
*/
-std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(Span<const StreamRole> roles)
{
Private *const d = _d();
@@ -952,7 +1099,8 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
if (roles.size() > streams().size())
return nullptr;
- CameraConfiguration *config = d->pipe_->generateConfiguration(this, roles);
+ std::unique_ptr<CameraConfiguration> config =
+ d->pipe_->generateConfiguration(this, roles);
if (!config) {
LOG(Camera, Debug)
<< "Pipeline handler failed to generate configuration";
@@ -969,10 +1117,16 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
LOG(Camera, Debug) << msg.str();
- return std::unique_ptr<CameraConfiguration>(config);
+ return config;
}
/**
+ * \fn std::unique_ptr<CameraConfiguration> \
+ * Camera::generateConfiguration(std::initializer_list<StreamRole> roles)
+ * \overload
+ */
+
+/**
* \brief Configure the camera prior to capture
* \param[in] config The camera configurations to setup
*
@@ -1127,6 +1281,11 @@ int Camera::queueRequest(Request *request)
return -EXDEV;
}
+ if (request->status() != Request::RequestPending) {
+ LOG(Camera, Error) << request->toString() << " is not valid";
+ return -EINVAL;
+ }
+
/*
* The camera state may change until the end of the function. No locking
* is however needed as PipelineHandler::queueRequest() will handle
@@ -1181,6 +1340,8 @@ int Camera::start(const ControlList *controls)
LOG(Camera, Debug) << "Starting capture";
+ ASSERT(d->requestSequence_ == 0);
+
ret = d->pipe_->invokeMethod(&PipelineHandler::start,
ConnectionTypeBlocking, this, controls);
if (ret)
diff --git a/src/libcamera/camera_controls.cpp b/src/libcamera/camera_controls.cpp
index cabdcf75..b672c7cf 100644
--- a/src/libcamera/camera_controls.cpp
+++ b/src/libcamera/camera_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_controls.cpp - Camera controls
+ * Camera controls
*/
#include "libcamera/internal/camera_controls.h"
diff --git a/src/libcamera/camera_lens.cpp b/src/libcamera/camera_lens.cpp
index b3d48199..ccc2a6a6 100644
--- a/src/libcamera/camera_lens.cpp
+++ b/src/libcamera/camera_lens.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_lens.cpp - A camera lens
+ * A camera lens
*/
#include "libcamera/internal/camera_lens.h"
diff --git a/src/libcamera/camera_manager.cpp b/src/libcamera/camera_manager.cpp
index 70d73822..95a9e326 100644
--- a/src/libcamera/camera_manager.cpp
+++ b/src/libcamera/camera_manager.cpp
@@ -2,77 +2,38 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera_manager.h - Camera management
+ * Camera management
*/
-#include <libcamera/camera_manager.h>
-
-#include <map>
-
-#include <libcamera/camera.h>
+#include "libcamera/internal/camera_manager.h"
#include <libcamera/base/log.h>
-#include <libcamera/base/mutex.h>
-#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
+#include <libcamera/camera.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera.h"
#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/process.h"
/**
- * \file camera_manager.h
+ * \file libcamera/camera_manager.h
* \brief The camera manager
*/
/**
+ * \file libcamera/internal/camera_manager.h
+ * \brief Internal camera manager support
+ */
+
+/**
* \brief Top-level libcamera namespace
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(Camera)
-class CameraManager::Private : public Extensible::Private, public Thread
-{
- LIBCAMERA_DECLARE_PUBLIC(CameraManager)
-
-public:
- Private();
-
- int start();
- void addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums);
- void removeCamera(Camera *camera);
-
- /*
- * This mutex protects
- *
- * - initialized_ and status_ during initialization
- * - cameras_ and camerasByDevnum_ after initialization
- */
- mutable Mutex mutex_;
- std::vector<std::shared_ptr<Camera>> cameras_;
- std::map<dev_t, std::weak_ptr<Camera>> camerasByDevnum_;
-
-protected:
- void run() override;
-
-private:
- int init();
- void createPipelineHandlers();
- void cleanup();
-
- ConditionVariable cv_;
- bool initialized_;
- int status_;
-
- std::unique_ptr<DeviceEnumerator> enumerator_;
-
- IPAManager ipaManager_;
- ProcessManager processManager_;
-};
-
CameraManager::Private::Private()
: initialized_(false)
{
@@ -87,7 +48,9 @@ int CameraManager::Private::start()
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return initialized_; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return initialized_;
+ });
status = status_;
}
@@ -129,23 +92,45 @@ int CameraManager::Private::init()
return -ENODEV;
createPipelineHandlers();
+ enumerator_->devicesAdded.connect(this, &Private::createPipelineHandlers);
return 0;
}
void CameraManager::Private::createPipelineHandlers()
{
- CameraManager *const o = LIBCAMERA_O_PTR();
-
/*
* \todo Try to read handlers and order from configuration
- * file and only fallback on all handlers if there is no
- * configuration file.
+ * file and only fallback on environment variable or all handlers, if
+ * there is no configuration file.
*/
- std::vector<PipelineHandlerFactory *> &factories =
- PipelineHandlerFactory::factories();
+ const char *pipesList =
+ utils::secure_getenv("LIBCAMERA_PIPELINES_MATCH_LIST");
+ if (pipesList) {
+ /*
+ * When a list of preferred pipelines is defined, iterate
+ * through the ordered list to match the enumerated devices.
+ */
+ for (const auto &pipeName : utils::split(pipesList, ",")) {
+ const PipelineHandlerFactoryBase *factory;
+ factory = PipelineHandlerFactoryBase::getFactoryByName(pipeName);
+ if (!factory)
+ continue;
+
+ LOG(Camera, Debug)
+ << "Found listed pipeline handler '"
+ << pipeName << "'";
+ pipelineFactoryMatch(factory);
+ }
+
+ return;
+ }
+
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
- for (PipelineHandlerFactory *factory : factories) {
+ /* Match all the registered pipeline handlers. */
+ for (const PipelineHandlerFactoryBase *factory : factories) {
LOG(Camera, Debug)
<< "Found registered pipeline handler '"
<< factory->name() << "'";
@@ -153,18 +138,24 @@ void CameraManager::Private::createPipelineHandlers()
* Try each pipeline handler until it exhaust
* all pipelines it can provide.
*/
- while (1) {
- std::shared_ptr<PipelineHandler> pipe = factory->create(o);
- if (!pipe->match(enumerator_.get()))
- break;
-
- LOG(Camera, Debug)
- << "Pipeline handler \"" << factory->name()
- << "\" matched";
- }
+ pipelineFactoryMatch(factory);
}
+}
- enumerator_->devicesAdded.connect(this, &Private::createPipelineHandlers);
+void CameraManager::Private::pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory)
+{
+ CameraManager *const o = LIBCAMERA_O_PTR();
+
+ /* Provide as many matching pipelines as possible. */
+ while (1) {
+ std::shared_ptr<PipelineHandler> pipe = factory->create(o);
+ if (!pipe->match(enumerator_.get()))
+ break;
+
+ LOG(Camera, Debug)
+ << "Pipeline handler \"" << factory->name()
+ << "\" matched";
+ }
}
void CameraManager::Private::cleanup()
@@ -178,18 +169,36 @@ void CameraManager::Private::cleanup()
* process deletion requests from the thread's message queue as the event
* loop is not in action here.
*/
- cameras_.clear();
+ {
+ MutexLocker locker(mutex_);
+ cameras_.clear();
+ }
+
dispatchMessages(Message::Type::DeferredDelete);
enumerator_.reset(nullptr);
}
-void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums)
+/**
+ * \brief Add a camera to the camera manager
+ * \param[in] camera The camera to be added
+ *
+ * This function is called by pipeline handlers to register the cameras they
+ * handle with the camera manager. Registered cameras are immediately made
+ * available to the system.
+ *
+ * Device numbers from the SystemDevices property are used by the V4L2
+ * compatibility layer to map V4L2 device nodes to Camera instances.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
- for (std::shared_ptr<Camera> c : cameras_) {
+ for (const std::shared_ptr<Camera> &c : cameras_) {
if (c->id() == camera->id()) {
LOG(Camera, Fatal)
<< "Trying to register a camera with a duplicated ID '"
@@ -201,17 +210,31 @@ void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera,
cameras_.push_back(std::move(camera));
unsigned int index = cameras_.size() - 1;
- for (dev_t devnum : devnums)
- camerasByDevnum_[devnum] = cameras_[index];
+
+ /* Report the addition to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraAdded.emit(cameras_[index]);
}
-void CameraManager::Private::removeCamera(Camera *camera)
+/**
+ * \brief Remove a camera from the camera manager
+ * \param[in] camera The camera to be removed
+ *
+ * This function is called by pipeline handlers to unregister cameras from the
+ * camera manager. Unregistered cameras won't be reported anymore by the
+ * cameras() and get() calls, but references may still exist in applications.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::removeCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
auto iter = std::find_if(cameras_.begin(), cameras_.end(),
[camera](std::shared_ptr<Camera> &c) {
- return c.get() == camera;
+ return c.get() == camera.get();
});
if (iter == cameras_.end())
return;
@@ -219,14 +242,11 @@ void CameraManager::Private::removeCamera(Camera *camera)
LOG(Camera, Debug)
<< "Unregistering camera '" << camera->id() << "'";
- auto iter_d = std::find_if(camerasByDevnum_.begin(), camerasByDevnum_.end(),
- [camera](const std::pair<dev_t, std::weak_ptr<Camera>> &p) {
- return p.second.lock().get() == camera;
- });
- if (iter_d != camerasByDevnum_.end())
- camerasByDevnum_.erase(iter_d);
-
cameras_.erase(iter);
+
+ /* Report the removal to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraRemoved.emit(camera);
}
/**
@@ -363,35 +383,6 @@ std::shared_ptr<Camera> CameraManager::get(const std::string &id)
}
/**
- * \brief Retrieve a camera based on device number
- * \param[in] devnum Device number of camera to get
- *
- * This function is meant solely for the use of the V4L2 compatibility
- * layer, to map device nodes to Camera instances. Applications shall
- * not use it and shall instead retrieve cameras by name.
- *
- * Before calling this function the caller is responsible for ensuring that
- * the camera manager is running.
- *
- * \context This function is \threadsafe.
- *
- * \return Shared pointer to Camera object, which is empty if the camera is
- * not found
- */
-std::shared_ptr<Camera> CameraManager::get(dev_t devnum)
-{
- Private *const d = _d();
-
- MutexLocker locker(d->mutex_);
-
- auto iter = d->camerasByDevnum_.find(devnum);
- if (iter == d->camerasByDevnum_.end())
- return nullptr;
-
- return iter->second.lock();
-}
-
-/**
* \var CameraManager::cameraAdded
* \brief Notify of a new camera added to the system
*
@@ -420,51 +411,6 @@ std::shared_ptr<Camera> CameraManager::get(dev_t devnum)
*/
/**
- * \brief Add a camera to the camera manager
- * \param[in] camera The camera to be added
- * \param[in] devnums The device numbers to associate with \a camera
- *
- * This function is called by pipeline handlers to register the cameras they
- * handle with the camera manager. Registered cameras are immediately made
- * available to the system.
- *
- * \a devnums are used by the V4L2 compatibility layer to map V4L2 device nodes
- * to Camera instances.
- *
- * \context This function shall be called from the CameraManager thread.
- */
-void CameraManager::addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums)
-{
- Private *const d = _d();
-
- ASSERT(Thread::current() == d);
-
- d->addCamera(camera, devnums);
- cameraAdded.emit(camera);
-}
-
-/**
- * \brief Remove a camera from the camera manager
- * \param[in] camera The camera to be removed
- *
- * This function is called by pipeline handlers to unregister cameras from the
- * camera manager. Unregistered cameras won't be reported anymore by the
- * cameras() and get() calls, but references may still exist in applications.
- *
- * \context This function shall be called from the CameraManager thread.
- */
-void CameraManager::removeCamera(std::shared_ptr<Camera> camera)
-{
- Private *const d = _d();
-
- ASSERT(Thread::current() == d);
-
- d->removeCamera(camera.get());
- cameraRemoved.emit(camera);
-}
-
-/**
* \fn const std::string &CameraManager::version()
* \brief Retrieve the libcamera version string
* \context This function is \a threadsafe.
diff --git a/src/libcamera/color_space.cpp b/src/libcamera/color_space.cpp
index 895e5c8e..3d1c456c 100644
--- a/src/libcamera/color_space.cpp
+++ b/src/libcamera/color_space.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2021, Raspberry Pi Ltd
*
- * color_space.cpp - color spaces.
+ * color spaces.
*/
#include <libcamera/color_space.h>
@@ -12,6 +12,11 @@
#include <map>
#include <sstream>
#include <utility>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/formats.h"
/**
* \file color_space.h
@@ -29,21 +34,33 @@ namespace libcamera {
* (sometimes also referred to as the quantisation) of the color space.
*
* Certain combinations of these fields form well-known standard color
- * spaces such as "JPEG" or "REC709".
+ * spaces such as "sRGB" or "Rec709".
*
* In the strictest sense a "color space" formally only refers to the
* color primaries and white point. Here, however, the ColorSpace class
* adopts the common broader usage that includes the transfer function,
* Y'CbCr encoding method and quantisation.
*
- * For more information on the specific color spaces described here, please
- * see:
+ * More information on color spaces is available in the V4L2 documentation, see
+ * in particular
*
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-srgb">sRGB</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-jpeg">JPEG</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-smpte-170m">SMPTE 170M</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-rec709">Rec.709</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-bt2020">Rec.2020</a>
+ *
+ * Note that there is no guarantee of a 1:1 mapping between color space names
+ * and definitions in libcamera and V4L2. Two notable differences are
+ *
+ * - The sRGB libcamera color space is defined for RGB formats only with no
+ * Y'CbCr encoding and a full quantization range, while the V4L2 SRGB color
+ * space has a Y'CbCr encoding and a limited quantization range.
+ * - The sYCC libcamera color space is called JPEG in V4L2 due to historical
+ * reasons.
+ *
+ * \todo Define the color space fully in the libcamera API to avoid referencing
+ * V4L2
*/
/**
@@ -118,11 +135,130 @@ namespace libcamera {
*/
/**
+ * \brief A constant representing a raw color space (from a sensor)
+ */
+const ColorSpace ColorSpace::Raw = {
+ Primaries::Raw,
+ TransferFunction::Linear,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sRGB color space (RGB formats only)
+ */
+const ColorSpace ColorSpace::Srgb = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sYCC color space, typically used for
+ * encoding JPEG images
+ */
+const ColorSpace ColorSpace::Sycc = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::Rec601,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the SMPTE170M color space
+ */
+const ColorSpace ColorSpace::Smpte170m = {
+ Primaries::Smpte170m,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec601,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.709 color space
+ */
+const ColorSpace ColorSpace::Rec709 = {
+ Primaries::Rec709,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec709,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.2020 color space
+ */
+const ColorSpace ColorSpace::Rec2020 = {
+ Primaries::Rec2020,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec2020,
+ Range::Limited
+};
+
+/**
+ * \var ColorSpace::primaries
+ * \brief The color primaries of this color space
+ */
+
+/**
+ * \var ColorSpace::transferFunction
+ * \brief The transfer function used by this color space
+ */
+
+/**
+ * \var ColorSpace::ycbcrEncoding
+ * \brief The Y'CbCr encoding used by this color space
+ */
+
+/**
+ * \var ColorSpace::range
+ * \brief The pixel range used with by color space
+ */
+
+namespace {
+
+const std::array<std::pair<ColorSpace, const char *>, 6> colorSpaceNames = { {
+ { ColorSpace::Raw, "RAW" },
+ { ColorSpace::Srgb, "sRGB" },
+ { ColorSpace::Sycc, "sYCC" },
+ { ColorSpace::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Rec709, "Rec709" },
+ { ColorSpace::Rec2020, "Rec2020" },
+} };
+
+const std::map<ColorSpace::Primaries, std::string> primariesNames = {
+ { ColorSpace::Primaries::Raw, "RAW" },
+ { ColorSpace::Primaries::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Primaries::Rec709, "Rec709" },
+ { ColorSpace::Primaries::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::TransferFunction, std::string> transferNames = {
+ { ColorSpace::TransferFunction::Linear, "Linear" },
+ { ColorSpace::TransferFunction::Srgb, "sRGB" },
+ { ColorSpace::TransferFunction::Rec709, "Rec709" },
+};
+
+const std::map<ColorSpace::YcbcrEncoding, std::string> encodingNames = {
+ { ColorSpace::YcbcrEncoding::None, "None" },
+ { ColorSpace::YcbcrEncoding::Rec601, "Rec601" },
+ { ColorSpace::YcbcrEncoding::Rec709, "Rec709" },
+ { ColorSpace::YcbcrEncoding::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::Range, std::string> rangeNames = {
+ { ColorSpace::Range::Full, "Full" },
+ { ColorSpace::Range::Limited, "Limited" },
+};
+
+} /* namespace */
+
+/**
* \brief Assemble and return a readable string representation of the
* ColorSpace
*
- * If the color space matches a standard ColorSpace (such as ColorSpace::Jpeg)
- * then the short name of the color space ("JPEG") is returned. Otherwise
+ * If the color space matches a standard ColorSpace (such as ColorSpace::Sycc)
+ * then the short name of the color space ("sYCC") is returned. Otherwise
* the four constituent parts of the ColorSpace are assembled into a longer
* string.
*
@@ -132,14 +268,6 @@ std::string ColorSpace::toString() const
{
/* Print out a brief name only for standard color spaces. */
- static const std::array<std::pair<ColorSpace, const char *>, 6> colorSpaceNames = { {
- { ColorSpace::Raw, "RAW" },
- { ColorSpace::Jpeg, "JPEG" },
- { ColorSpace::Srgb, "sRGB" },
- { ColorSpace::Smpte170m, "SMPTE170M" },
- { ColorSpace::Rec709, "Rec709" },
- { ColorSpace::Rec2020, "Rec2020" },
- } };
auto it = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
[this](const auto &item) {
return *this == item.first;
@@ -149,28 +277,6 @@ std::string ColorSpace::toString() const
/* Assemble a name made of the constituent fields. */
- static const std::map<Primaries, std::string> primariesNames = {
- { Primaries::Raw, "RAW" },
- { Primaries::Smpte170m, "SMPTE170M" },
- { Primaries::Rec709, "Rec709" },
- { Primaries::Rec2020, "Rec2020" },
- };
- static const std::map<TransferFunction, std::string> transferNames = {
- { TransferFunction::Linear, "Linear" },
- { TransferFunction::Srgb, "sRGB" },
- { TransferFunction::Rec709, "Rec709" },
- };
- static const std::map<YcbcrEncoding, std::string> encodingNames = {
- { YcbcrEncoding::None, "None" },
- { YcbcrEncoding::Rec601, "Rec601" },
- { YcbcrEncoding::Rec709, "Rec709" },
- { YcbcrEncoding::Rec2020, "Rec2020" },
- };
- static const std::map<Range, std::string> rangeNames = {
- { Range::Full, "Full" },
- { Range::Limited, "Limited" },
- };
-
auto itPrimaries = primariesNames.find(primaries);
std::string primariesName =
itPrimaries == primariesNames.end() ? "Invalid" : itPrimaries->second;
@@ -213,88 +319,185 @@ std::string ColorSpace::toString(const std::optional<ColorSpace> &colorSpace)
}
/**
- * \var ColorSpace::primaries
- * \brief The color primaries of this color space
- */
-
-/**
- * \var ColorSpace::transferFunction
- * \brief The transfer function used by this color space
- */
-
-/**
- * \var ColorSpace::ycbcrEncoding
- * \brief The Y'CbCr encoding used by this color space
- */
-
-/**
- * \var ColorSpace::range
- * \brief The pixel range used with by color space
- */
-
-/**
- * \brief A constant representing a raw color space (from a sensor)
- */
-const ColorSpace ColorSpace::Raw = {
- Primaries::Raw,
- TransferFunction::Linear,
- YcbcrEncoding::None,
- Range::Full
-};
-
-/**
- * \brief A constant representing the JPEG color space used for
- * encoding JPEG images
- */
-const ColorSpace ColorSpace::Jpeg = {
- Primaries::Rec709,
- TransferFunction::Srgb,
- YcbcrEncoding::Rec601,
- Range::Full
-};
-
-/**
- * \brief A constant representing the sRGB color space
+ * \brief Construct a color space from a string
+ * \param[in] str The string
*
- * This is identical to the JPEG color space except that the Y'CbCr
- * range is limited rather than full.
- */
-const ColorSpace ColorSpace::Srgb = {
- Primaries::Rec709,
- TransferFunction::Srgb,
- YcbcrEncoding::Rec601,
- Range::Limited
-};
-
-/**
- * \brief A constant representing the SMPTE170M color space
- */
-const ColorSpace ColorSpace::Smpte170m = {
- Primaries::Smpte170m,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec601,
- Range::Limited
-};
-
-/**
- * \brief A constant representing the Rec.709 color space
+ * The string \a str can contain the name of a well-known color space, or be
+ * made of the four color space components separated by a '/' character, ordered
+ * as
+ *
+ * \verbatim primaries '/' transferFunction '/' ycbcrEncoding '/' range \endverbatim
+ *
+ * Any failure to parse the string, either because it doesn't match the expected
+ * format, or because the one of the names isn't recognized, will cause this
+ * function to return std::nullopt.
+ *
+ * \return The ColorSpace corresponding to the string, or std::nullopt if the
+ * string doesn't describe a known color space
*/
-const ColorSpace ColorSpace::Rec709 = {
- Primaries::Rec709,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec709,
- Range::Limited
-};
+std::optional<ColorSpace> ColorSpace::fromString(const std::string &str)
+{
+ /* First search for a standard color space name match. */
+ auto itColorSpace = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
+ [&str](const auto &item) {
+ return str == item.second;
+ });
+ if (itColorSpace != colorSpaceNames.end())
+ return itColorSpace->first;
+
+ /*
+ * If not found, the string must contain the four color space
+ * components separated by a '/' character.
+ */
+ const auto &split = utils::split(str, "/");
+ std::vector<std::string> components{ split.begin(), split.end() };
+
+ if (components.size() != 4)
+ return std::nullopt;
+
+ ColorSpace colorSpace = ColorSpace::Raw;
+
+ /* Color primaries */
+ auto itPrimaries = std::find_if(primariesNames.begin(), primariesNames.end(),
+ [&components](const auto &item) {
+ return components[0] == item.second;
+ });
+ if (itPrimaries == primariesNames.end())
+ return std::nullopt;
+
+ colorSpace.primaries = itPrimaries->first;
+
+ /* Transfer function */
+ auto itTransfer = std::find_if(transferNames.begin(), transferNames.end(),
+ [&components](const auto &item) {
+ return components[1] == item.second;
+ });
+ if (itTransfer == transferNames.end())
+ return std::nullopt;
+
+ colorSpace.transferFunction = itTransfer->first;
+
+ /* YCbCr encoding */
+ auto itEncoding = std::find_if(encodingNames.begin(), encodingNames.end(),
+ [&components](const auto &item) {
+ return components[2] == item.second;
+ });
+ if (itEncoding == encodingNames.end())
+ return std::nullopt;
+
+ colorSpace.ycbcrEncoding = itEncoding->first;
+
+ /* Quantization range */
+ auto itRange = std::find_if(rangeNames.begin(), rangeNames.end(),
+ [&components](const auto &item) {
+ return components[3] == item.second;
+ });
+ if (itRange == rangeNames.end())
+ return std::nullopt;
+
+ colorSpace.range = itRange->first;
+
+ return colorSpace;
+}
/**
- * \brief A constant representing the Rec.2020 color space
+ * \brief Adjust the color space to match a pixel format
+ * \param[in] format The pixel format
+ *
+ * Not all combinations of pixel formats and color spaces make sense. For
+ * instance, nobody uses a limited quantization range with raw Bayer formats,
+ * and the YcbcrEncoding::None encoding isn't valid for YUV formats. This
+ * function adjusts the ColorSpace to make it compatible with the given \a
+ * format, by applying the following rules:
+ *
+ * - The color space for RAW formats must be Raw.
+ * - The Y'CbCr encoding and quantization range for RGB formats must be
+ * YcbcrEncoding::None and Range::Full respectively.
+ * - The Y'CbCr encoding for YUV formats must not be YcbcrEncoding::None. The
+ * best encoding is in that case guessed based on the primaries and transfer
+ * function.
+ *
+ * \return True if the color space has been adjusted, or false if it was
+ * already compatible with the format and hasn't been changed
*/
-const ColorSpace ColorSpace::Rec2020 = {
- Primaries::Rec2020,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec2020,
- Range::Limited
-};
+bool ColorSpace::adjust(PixelFormat format)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+ bool adjusted = false;
+
+ switch (info.colourEncoding) {
+ case PixelFormatInfo::ColourEncodingRAW:
+ /* Raw formats must use the raw color space. */
+ if (*this != ColorSpace::Raw) {
+ *this = ColorSpace::Raw;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingRGB:
+ /*
+ * RGB formats can't have a Y'CbCr encoding, and must use full
+ * range quantization.
+ */
+ if (ycbcrEncoding != YcbcrEncoding::None) {
+ ycbcrEncoding = YcbcrEncoding::None;
+ adjusted = true;
+ }
+
+ if (range != Range::Full) {
+ range = Range::Full;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingYUV:
+ if (ycbcrEncoding != YcbcrEncoding::None)
+ break;
+
+ /*
+ * YUV formats must have a Y'CbCr encoding. Infer the most
+ * probable option from the transfer function and primaries.
+ */
+ switch (transferFunction) {
+ case TransferFunction::Linear:
+ /*
+ * Linear YUV is not used in any standard color space,
+ * pick the widely supported and used Rec601 as default.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+
+ case TransferFunction::Rec709:
+ switch (primaries) {
+ /* Raw should never happen. */
+ case Primaries::Raw:
+ case Primaries::Smpte170m:
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ case Primaries::Rec709:
+ ycbcrEncoding = YcbcrEncoding::Rec709;
+ break;
+ case Primaries::Rec2020:
+ ycbcrEncoding = YcbcrEncoding::Rec2020;
+ break;
+ }
+ break;
+
+ case TransferFunction::Srgb:
+ /*
+ * Only the sYCC color space uses the sRGB transfer
+ * function, the corresponding encoding is Rec601.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ }
+
+ adjusted = true;
+ break;
+ }
+
+ return adjusted;
+}
/**
* \brief Compare color spaces for equality
diff --git a/src/libcamera/control_ids.cpp.in b/src/libcamera/control_ids.cpp.in
index 5fb1c2c3..d283c1c1 100644
--- a/src/libcamera/control_ids.cpp.in
+++ b/src/libcamera/control_ids.cpp.in
@@ -24,14 +24,7 @@ namespace controls {
${controls_doc}
-/**
- * \brief Namespace for libcamera draft controls
- */
-namespace draft {
-
-${draft_controls_doc}
-
-} /* namespace draft */
+${vendor_controls_doc}
#ifndef __DOXYGEN__
/*
@@ -40,11 +33,8 @@ ${draft_controls_doc}
*/
${controls_def}
-namespace draft {
-
-${draft_controls_def}
+${vendor_controls_def}
-} /* namespace draft */
#endif
/**
diff --git a/src/libcamera/control_ids.yaml b/src/libcamera/control_ids_core.yaml
index ecab3ae9..bf1f1a83 100644
--- a/src/libcamera/control_ids.yaml
+++ b/src/libcamera/control_ids_core.yaml
@@ -2,10 +2,11 @@
#
# Copyright (C) 2019, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
# Unless otherwise stated, all controls are bi-directional, i.e. they can be
# set through Request::controls() and returned out through Request::metadata().
+vendor: libcamera
controls:
- AeEnable:
type: bool
@@ -156,6 +157,79 @@ controls:
control of which features should be automatically adjusted shouldn't
better be handled through a separate AE mode control.
+ - AeFlickerMode:
+ type: int32_t
+ description: |
+ Set the flicker mode, which determines whether, and how, the AGC/AEC
+ algorithm attempts to hide flicker effects caused by the duty cycle of
+ artificial lighting.
+
+ Although implementation dependent, many algorithms for "flicker
+ avoidance" work by restricting this exposure time to integer multiples
+ of the cycle period, wherever possible.
+
+ Implementations may not support all of the flicker modes listed below.
+
+ By default the system will start in FlickerAuto mode if this is
+ supported, otherwise the flicker mode will be set to FlickerOff.
+
+ enum:
+ - name: FlickerOff
+ value: 0
+ description: No flicker avoidance is performed.
+ - name: FlickerManual
+ value: 1
+ description: Manual flicker avoidance.
+ Suppress flicker effects caused by lighting running with a period
+ specified by the AeFlickerPeriod control.
+ \sa AeFlickerPeriod
+ - name: FlickerAuto
+ value: 2
+ description: Automatic flicker period detection and avoidance.
+ The system will automatically determine the most likely value of
+ flicker period, and avoid flicker of this frequency. Once flicker
+ is being corrected, it is implementation dependent whether the
+ system is still able to detect a change in the flicker period.
+ \sa AeFlickerDetected
+
+ - AeFlickerPeriod:
+ type: int32_t
+ description: Manual flicker period in microseconds.
+ This value sets the current flicker period to avoid. It is used when
+ AeFlickerMode is set to FlickerManual.
+
+ To cancel 50Hz mains flicker, this should be set to 10000 (corresponding
+ to 100Hz), or 8333 (120Hz) for 60Hz mains.
+
+ Setting the mode to FlickerManual when no AeFlickerPeriod has ever been
+ set means that no flicker cancellation occurs (until the value of this
+ control is updated).
+
+ Switching to modes other than FlickerManual has no effect on the
+ value of the AeFlickerPeriod control.
+
+ \sa AeFlickerMode
+
+ - AeFlickerDetected:
+ type: int32_t
+ description: Flicker period detected in microseconds.
+ The value reported here indicates the currently detected flicker
+ period, or zero if no flicker at all is detected.
+
+ When AeFlickerMode is set to FlickerAuto, there may be a period during
+ which the value reported here remains zero. Once a non-zero value is
+ reported, then this is the flicker period that has been detected and is
+ now being cancelled.
+
+ In the case of 50Hz mains flicker, the value would be 10000
+ (corresponding to 100Hz), or 8333 (120Hz) for 60Hz mains flicker.
+
+ It is implementation dependent whether the system can continue to detect
+ flicker of different periods when another frequency is already being
+ cancelled.
+
+ \sa AeFlickerMode
+
- Brightness:
type: float
description: |
@@ -275,12 +349,12 @@ controls:
type: int32_t
description: |
Reports a Figure of Merit (FoM) to indicate how in-focus the frame is.
- A larger FocusFoM value indicates a more in-focus frame. This control
- depends on the IPA to gather ISP statistics from the defined focus
- region, and combine them in a suitable way to generate a FocusFoM value.
- In this respect, it is not necessarily aimed at providing a way to
- implement a focus algorithm by the application, rather an indication of
- how in-focus a frame is.
+ A larger FocusFoM value indicates a more in-focus frame. This singular
+ value may be based on a combination of statistics gathered from
+ multiple focus regions within an image. The number of focus regions and
+ method of combination is platform dependent. In this respect, it is not
+ necessarily aimed at providing a way to implement a focus algorithm by
+ the application, rather an indication of how in-focus a frame is.
- ColourCorrectionMatrix:
type: float
@@ -291,7 +365,7 @@ controls:
transformation. The 3x3 matrix is stored in conventional reading
order in an array of 9 floating point values.
- size: [3x3]
+ size: [3,3]
- ScalerCrop:
type: Rectangle
@@ -333,8 +407,8 @@ controls:
- FrameDurationLimits:
type: int64_t
description: |
- The minimum and maximum (in that order) frame duration,
- expressed in microseconds.
+ The minimum and maximum (in that order) frame duration, expressed in
+ microseconds.
When provided by applications, the control specifies the sensor frame
duration interval the pipeline has to use. This limits the largest
@@ -343,7 +417,7 @@ controls:
the sensor will not be able to raise the exposure time above 33ms.
A fixed frame duration is achieved by setting the minimum and maximum
values to be the same. Setting both values to 0 reverts to using the
- IPA provided defaults.
+ camera defaults.
The maximum frame duration provides the absolute limit to the shutter
speed computed by the AE algorithm and it overrides any exposure mode
@@ -375,7 +449,7 @@ controls:
range of reported temperatures is device dependent.
The SensorTemperature control will only be returned in metadata if a
- themal sensor is present.
+ thermal sensor is present.
- SensorTimestamp:
type: int64_t
@@ -408,6 +482,13 @@ controls:
LensPosition control.
In this mode the AfState will always report AfStateIdle.
+
+ If the camera is started in AfModeManual, it will move the focus
+ lens to the position specified by the LensPosition control.
+
+ This mode is the recommended default value for the AfMode control.
+ External cameras (as reported by the Location property set to
+ CameraLocationExternal) may use a different default value.
- name: AfModeAuto
value: 1
description: |
@@ -591,25 +672,27 @@ controls:
AfModeManual, though the value is reported back unconditionally in all
modes.
- The units are a reciprocal distance scale like dioptres but normalised
- for the hyperfocal distance. That is, for a lens with hyperfocal
- distance H, and setting it to a focal distance D, the lens position LP,
- which is generally a non-integer, is given by
+ This value, which is generally a non-integer, is the reciprocal of the
+ focal distance in metres, also known as dioptres. That is, to set a
+ focal distance D, the lens position LP is given by
- \f$LP = \frac{H}{D}\f$
+ \f$LP = \frac{1\mathrm{m}}{D}\f$
For example:
0 moves the lens to infinity.
- 0.5 moves the lens to twice the hyperfocal distance.
- 1 moves the lens to the hyperfocal position.
- And larger values will focus the lens ever closer.
+ 0.5 moves the lens to focus on objects 2m away.
+ 2 moves the lens to focus on objects 50cm away.
+ And larger values will focus the lens closer.
- \todo Define a property to report the Hyperforcal distance of calibrated
- lenses.
+ The default value of the control should indicate a good general position
+ for the lens, often corresponding to the hyperfocal distance (the
+ closest position for which objects at infinity are still acceptably
+ sharp). The minimum will often be zero (meaning infinity), and the
+ maximum value defines the closest focus position.
- \todo Define a property to report the maximum and minimum positions of
- this lens. The minimum value will often be zero (meaning infinity).
+ \todo Define a property to report the Hyperfocal distance of calibrated
+ lenses.
- AfState:
type: int32_t
@@ -692,253 +775,94 @@ controls:
Continuous AF is paused. No further state changes or lens movements
will occur until the AfPauseResume control is sent.
- # ----------------------------------------------------------------------------
- # Draft controls section
-
- - AePrecaptureTrigger:
+ - HdrMode:
type: int32_t
- draft: true
description: |
- Control for AE metering trigger. Currently identical to
- ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
+ Control to set the mode to be used for High Dynamic Range (HDR)
+ imaging. HDR techniques typically include multiple exposure, image
+ fusion and tone mapping techniques to improve the dynamic range of the
+ resulting images.
- Whether the camera device will trigger a precapture metering sequence
- when it processes this request.
- enum:
- - name: AePrecaptureTriggerIdle
- value: 0
- description: The trigger is idle.
- - name: AePrecaptureTriggerStart
- value: 1
- description: The pre-capture AE metering is started by the camera.
- - name: AePrecaptureTriggerCancel
- value: 2
- description: |
- The camera will cancel any active or completed metering sequence.
- The AE algorithm is reset to its initial state.
+ When using an HDR mode, images are captured with different sets of AGC
+ settings called HDR channels. Channels indicate in particular the type
+ of exposure (short, medium or long) used to capture the raw image,
+ before fusion. Each HDR image is tagged with the corresponding channel
+ using the HdrChannel control.
- - NoiseReductionMode:
- type: int32_t
- draft: true
- description: |
- Control to select the noise reduction algorithm mode. Currently
- identical to ANDROID_NOISE_REDUCTION_MODE.
+ \sa HdrChannel
- Mode of operation for the noise reduction algorithm.
enum:
- - name: NoiseReductionModeOff
+ - name: HdrModeOff
value: 0
- description: No noise reduction is applied
- - name: NoiseReductionModeFast
+ description: |
+ HDR is disabled. Metadata for this frame will not include the
+ HdrChannel control.
+ - name: HdrModeMultiExposureUnmerged
value: 1
description: |
- Noise reduction is applied without reducing the frame rate.
- - name: NoiseReductionModeHighQuality
+ Multiple exposures will be generated in an alternating fashion.
+ However, they will not be merged together and will be returned to
+ the application as they are. Each image will be tagged with the
+ correct HDR channel, indicating what kind of exposure it is. The
+ tag should be the same as in the HdrModeMultiExposure case.
+
+ The expectation is that an application using this mode would merge
+ the frames to create HDR images for itself if it requires them.
+ - name: HdrModeMultiExposure
value: 2
description: |
- High quality noise reduction at the expense of frame rate.
- - name: NoiseReductionModeMinimal
+ Multiple exposures will be generated and merged to create HDR
+ images. Each image will be tagged with the HDR channel (long, medium
+ or short) that arrived and which caused this image to be output.
+
+ Systems that use two channels for HDR will return images tagged
+ alternately as the short and long channel. Systems that use three
+ channels for HDR will cycle through the short, medium and long
+ channel before repeating.
+ - name: HdrModeSingleExposure
value: 3
description: |
- Minimal noise reduction is applied without reducing the frame rate.
- - name: NoiseReductionModeZSL
- value: 4
- description: |
- Noise reduction is applied at different levels to different streams.
-
- - ColorCorrectionAberrationMode:
- type: int32_t
- draft: true
- description: |
- Control to select the color correction aberration mode. Currently
- identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
-
- Mode of operation for the chromatic aberration correction algorithm.
- enum:
- - name: ColorCorrectionAberrationOff
- value: 0
- description: No aberration correction is applied.
- - name: ColorCorrectionAberrationFast
- value: 1
- description: Aberration correction will not slow down the frame rate.
- - name: ColorCorrectionAberrationHighQuality
- value: 2
- description: |
- High quality aberration correction which might reduce the frame
- rate.
-
- - AeState:
- type: int32_t
- draft: true
- description: |
- Control to report the current AE algorithm state. Currently identical to
- ANDROID_CONTROL_AE_STATE.
-
- Current state of the AE algorithm.
- enum:
- - name: AeStateInactive
- value: 0
- description: The AE algorithm is inactive.
- - name: AeStateSearching
- value: 1
- description: The AE algorithm has not converged yet.
- - name: AeStateConverged
- value: 2
- description: The AE algorithm has converged.
- - name: AeStateLocked
- value: 3
- description: The AE algorithm is locked.
- - name: AeStateFlashRequired
+ Multiple frames all at a single exposure will be used to create HDR
+ images. These images should be reported as all corresponding to the
+ HDR short channel.
+ - name: HdrModeNight
value: 4
- description: The AE algorithm would need a flash for good results
- - name: AeStatePrecapture
- value: 5
description: |
- The AE algorithm has started a pre-capture metering session.
- \sa AePrecaptureTrigger
-
- - AwbState:
- type: int32_t
- draft: true
- description: |
- Control to report the current AWB algorithm state. Currently identical
- to ANDROID_CONTROL_AWB_STATE.
-
- Current state of the AWB algorithm.
- enum:
- - name: AwbStateInactive
- value: 0
- description: The AWB algorithm is inactive.
- - name: AwbStateSearching
- value: 1
- description: The AWB algorithm has not converged yet.
- - name: AwbConverged
- value: 2
- description: The AWB algorithm has converged.
- - name: AwbLocked
- value: 3
- description: The AWB algorithm is locked.
-
- - SensorRollingShutterSkew:
- type: int64_t
- draft: true
- description: |
- Control to report the time between the start of exposure of the first
- row and the start of exposure of the last row. Currently identical to
- ANDROID_SENSOR_ROLLING_SHUTTER_SKEW
-
- - LensShadingMapMode:
- type: int32_t
- draft: true
- description: |
- Control to report if the lens shading map is available. Currently
- identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
- enum:
- - name: LensShadingMapModeOff
- value: 0
- description: No lens shading map mode is available.
- - name: LensShadingMapModeOn
- value: 1
- description: The lens shading map mode is available.
-
- - SceneFlicker:
- type: int32_t
- draft: true
- description: |
- Control to report the detected scene light frequency. Currently
- identical to ANDROID_STATISTICS_SCENE_FLICKER.
- enum:
- - name: SceneFickerOff
- value: 0
- description: No flickering detected.
- - name: SceneFicker50Hz
- value: 1
- description: 50Hz flickering detected.
- - name: SceneFicker60Hz
- value: 2
- description: 60Hz flickering detected.
+ Multiple frames will be combined to produce "night mode" images. It
+ is up to the implementation exactly which HDR channels it uses, and
+ the images will all be tagged accordingly with the correct HDR
+ channel information.
- - PipelineDepth:
+ - HdrChannel:
type: int32_t
- draft: true
description: |
- Specifies the number of pipeline stages the frame went through from when
- it was exposed to when the final completed result was available to the
- framework. Always less than or equal to PipelineMaxDepth. Currently
- identical to ANDROID_REQUEST_PIPELINE_DEPTH.
+ This value is reported back to the application so that it can discover
+ whether this capture corresponds to the short or long exposure image (or
+ any other image used by the HDR procedure). An application can monitor
+ the HDR channel to discover when the differently exposed images have
+ arrived.
- The typical value for this control is 3 as a frame is first exposed,
- captured and then processed in a single pass through the ISP. Any
- additional processing step performed after the ISP pass (in example face
- detection, additional format conversions etc) count as an additional
- pipeline stage.
+ This metadata is only available when an HDR mode has been enabled.
- - MaxLatency:
- type: int32_t
- draft: true
- description: |
- The maximum number of frames that can occur after a request (different
- than the previous) has been submitted, and before the result's state
- becomes synchronized. A value of -1 indicates unknown latency, and 0
- indicates per-frame control. Currently identical to
- ANDROID_SYNC_MAX_LATENCY.
+ \sa HdrMode
- - TestPatternMode:
- type: int32_t
- draft: true
- description: |
- Control to select the test pattern mode. Currently identical to
- ANDROID_SENSOR_TEST_PATTERN_MODE.
enum:
- - name: TestPatternModeOff
+ - name: HdrChannelNone
value: 0
description: |
- No test pattern mode is used. The camera device returns frames from
- the image sensor.
- - name: TestPatternModeSolidColor
+ This image does not correspond to any of the captures used to create
+ an HDR image.
+ - name: HdrChannelShort
value: 1
description: |
- Each pixel in [R, G_even, G_odd, B] is replaced by its respective
- color channel provided in test pattern data.
- \todo Add control for test pattern data.
- - name: TestPatternModeColorBars
+ This is a short exposure image.
+ - name: HdrChannelMedium
value: 2
description: |
- All pixel data is replaced with an 8-bar color pattern. The vertical
- bars (left-to-right) are as follows; white, yellow, cyan, green,
- magenta, red, blue and black. Each bar should take up 1/8 of the
- sensor pixel array width. When this is not possible, the bar size
- should be rounded down to the nearest integer and the pattern can
- repeat on the right side. Each bar's height must always take up the
- full sensor pixel array height.
- - name: TestPatternModeColorBarsFadeToGray
+ This is a medium exposure image.
+ - name: HdrChannelLong
value: 3
description: |
- The test pattern is similar to TestPatternModeColorBars,
- except that each bar should start at its specified color at the top
- and fade to gray at the bottom. Furthermore each bar is further
- subdevided into a left and right half. The left half should have a
- smooth gradient, and the right half should have a quantized
- gradient. In particular, the right half's should consist of blocks
- of the same color for 1/16th active sensor pixel array width. The
- least significant bits in the quantized gradient should be copied
- from the most significant bits of the smooth gradient. The height of
- each bar should always be a multiple of 128. When this is not the
- case, the pattern should repeat at the bottom of the image.
- - name: TestPatternModePn9
- value: 4
- description: |
- All pixel data is replaced by a pseudo-random sequence generated
- from a PN9 512-bit sequence (typically implemented in hardware with
- a linear feedback shift register). The generator should be reset at
- the beginning of each frame, and thus each subsequent raw frame with
- this test pattern should be exactly the same as the last.
- - name: TestPatternModeCustom1
- value: 256
- description: |
- The first custom test pattern. All custom patterns that are
- available only on this camera device are at least this numeric
- value. All of the custom test patterns will be static (that is the
- raw image must not vary from frame to frame).
+ This is a long exposure image.
...
diff --git a/src/libcamera/control_ids_draft.yaml b/src/libcamera/control_ids_draft.yaml
new file mode 100644
index 00000000..9bef5bf1
--- /dev/null
+++ b/src/libcamera/control_ids_draft.yaml
@@ -0,0 +1,230 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+# Unless otherwise stated, all controls are bi-directional, i.e. they can be
+# set through Request::controls() and returned out through Request::metadata().
+vendor: draft
+controls:
+ - AePrecaptureTrigger:
+ type: int32_t
+ description: |
+ Control for AE metering trigger. Currently identical to
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
+
+ Whether the camera device will trigger a precapture metering sequence
+ when it processes this request.
+ enum:
+ - name: AePrecaptureTriggerIdle
+ value: 0
+ description: The trigger is idle.
+ - name: AePrecaptureTriggerStart
+ value: 1
+ description: The pre-capture AE metering is started by the camera.
+ - name: AePrecaptureTriggerCancel
+ value: 2
+ description: |
+ The camera will cancel any active or completed metering sequence.
+ The AE algorithm is reset to its initial state.
+
+ - NoiseReductionMode:
+ type: int32_t
+ description: |
+ Control to select the noise reduction algorithm mode. Currently
+ identical to ANDROID_NOISE_REDUCTION_MODE.
+
+ Mode of operation for the noise reduction algorithm.
+ enum:
+ - name: NoiseReductionModeOff
+ value: 0
+ description: No noise reduction is applied
+ - name: NoiseReductionModeFast
+ value: 1
+ description: |
+ Noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeHighQuality
+ value: 2
+ description: |
+ High quality noise reduction at the expense of frame rate.
+ - name: NoiseReductionModeMinimal
+ value: 3
+ description: |
+ Minimal noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeZSL
+ value: 4
+ description: |
+ Noise reduction is applied at different levels to different streams.
+
+ - ColorCorrectionAberrationMode:
+ type: int32_t
+ description: |
+ Control to select the color correction aberration mode. Currently
+ identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
+
+ Mode of operation for the chromatic aberration correction algorithm.
+ enum:
+ - name: ColorCorrectionAberrationOff
+ value: 0
+ description: No aberration correction is applied.
+ - name: ColorCorrectionAberrationFast
+ value: 1
+ description: Aberration correction will not slow down the frame rate.
+ - name: ColorCorrectionAberrationHighQuality
+ value: 2
+ description: |
+ High quality aberration correction which might reduce the frame
+ rate.
+
+ - AeState:
+ type: int32_t
+ description: |
+ Control to report the current AE algorithm state. Currently identical to
+ ANDROID_CONTROL_AE_STATE.
+
+ Current state of the AE algorithm.
+ enum:
+ - name: AeStateInactive
+ value: 0
+ description: The AE algorithm is inactive.
+ - name: AeStateSearching
+ value: 1
+ description: The AE algorithm has not converged yet.
+ - name: AeStateConverged
+ value: 2
+ description: The AE algorithm has converged.
+ - name: AeStateLocked
+ value: 3
+ description: The AE algorithm is locked.
+ - name: AeStateFlashRequired
+ value: 4
+ description: The AE algorithm would need a flash for good results
+ - name: AeStatePrecapture
+ value: 5
+ description: |
+ The AE algorithm has started a pre-capture metering session.
+ \sa AePrecaptureTrigger
+
+ - AwbState:
+ type: int32_t
+ description: |
+ Control to report the current AWB algorithm state. Currently identical
+ to ANDROID_CONTROL_AWB_STATE.
+
+ Current state of the AWB algorithm.
+ enum:
+ - name: AwbStateInactive
+ value: 0
+ description: The AWB algorithm is inactive.
+ - name: AwbStateSearching
+ value: 1
+ description: The AWB algorithm has not converged yet.
+ - name: AwbConverged
+ value: 2
+ description: The AWB algorithm has converged.
+ - name: AwbLocked
+ value: 3
+ description: The AWB algorithm is locked.
+
+ - SensorRollingShutterSkew:
+ type: int64_t
+ description: |
+ Control to report the time between the start of exposure of the first
+ row and the start of exposure of the last row. Currently identical to
+ ANDROID_SENSOR_ROLLING_SHUTTER_SKEW
+
+ - LensShadingMapMode:
+ type: int32_t
+ description: |
+ Control to report if the lens shading map is available. Currently
+ identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
+ enum:
+ - name: LensShadingMapModeOff
+ value: 0
+ description: No lens shading map mode is available.
+ - name: LensShadingMapModeOn
+ value: 1
+ description: The lens shading map mode is available.
+
+ - PipelineDepth:
+ type: int32_t
+ description: |
+ Specifies the number of pipeline stages the frame went through from when
+ it was exposed to when the final completed result was available to the
+ framework. Always less than or equal to PipelineMaxDepth. Currently
+ identical to ANDROID_REQUEST_PIPELINE_DEPTH.
+
+ The typical value for this control is 3 as a frame is first exposed,
+ captured and then processed in a single pass through the ISP. Any
+ additional processing step performed after the ISP pass (in example face
+ detection, additional format conversions etc) count as an additional
+ pipeline stage.
+
+ - MaxLatency:
+ type: int32_t
+ description: |
+ The maximum number of frames that can occur after a request (different
+ than the previous) has been submitted, and before the result's state
+ becomes synchronized. A value of -1 indicates unknown latency, and 0
+ indicates per-frame control. Currently identical to
+ ANDROID_SYNC_MAX_LATENCY.
+
+ - TestPatternMode:
+ type: int32_t
+ description: |
+ Control to select the test pattern mode. Currently identical to
+ ANDROID_SENSOR_TEST_PATTERN_MODE.
+ enum:
+ - name: TestPatternModeOff
+ value: 0
+ description: |
+ No test pattern mode is used. The camera device returns frames from
+ the image sensor.
+ - name: TestPatternModeSolidColor
+ value: 1
+ description: |
+ Each pixel in [R, G_even, G_odd, B] is replaced by its respective
+ color channel provided in test pattern data.
+ \todo Add control for test pattern data.
+ - name: TestPatternModeColorBars
+ value: 2
+ description: |
+ All pixel data is replaced with an 8-bar color pattern. The vertical
+ bars (left-to-right) are as follows; white, yellow, cyan, green,
+ magenta, red, blue and black. Each bar should take up 1/8 of the
+ sensor pixel array width. When this is not possible, the bar size
+ should be rounded down to the nearest integer and the pattern can
+ repeat on the right side. Each bar's height must always take up the
+ full sensor pixel array height.
+ - name: TestPatternModeColorBarsFadeToGray
+ value: 3
+ description: |
+ The test pattern is similar to TestPatternModeColorBars,
+ except that each bar should start at its specified color at the top
+ and fade to gray at the bottom. Furthermore each bar is further
+ subdevided into a left and right half. The left half should have a
+ smooth gradient, and the right half should have a quantized
+ gradient. In particular, the right half's should consist of blocks
+ of the same color for 1/16th active sensor pixel array width. The
+ least significant bits in the quantized gradient should be copied
+ from the most significant bits of the smooth gradient. The height of
+ each bar should always be a multiple of 128. When this is not the
+ case, the pattern should repeat at the bottom of the image.
+ - name: TestPatternModePn9
+ value: 4
+ description: |
+ All pixel data is replaced by a pseudo-random sequence generated
+ from a PN9 512-bit sequence (typically implemented in hardware with
+ a linear feedback shift register). The generator should be reset at
+ the beginning of each frame, and thus each subsequent raw frame with
+ this test pattern should be exactly the same as the last.
+ - name: TestPatternModeCustom1
+ value: 256
+ description: |
+ The first custom test pattern. All custom patterns that are
+ available only on this camera device are at least this numeric
+ value. All of the custom test patterns will be static (that is the
+ raw image must not vary from frame to frame).
+
+...
diff --git a/src/libcamera/control_ids_rpi.yaml b/src/libcamera/control_ids_rpi.yaml
new file mode 100644
index 00000000..cb097f88
--- /dev/null
+++ b/src/libcamera/control_ids_rpi.yaml
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Raspberry Pi (VC4 and PiSP) specific vendor controls
+vendor: rpi
+controls:
+ - StatsOutputEnable:
+ type: bool
+ description: |
+ Toggles the Raspberry Pi IPA to output a binary dump of the hardware
+ generated statistics through the Request metadata in the Bcm2835StatsOutput
+ control.
+
+ \sa Bcm2835StatsOutput
+
+ - Bcm2835StatsOutput:
+ type: uint8_t
+ size: [n]
+ description: |
+ Span of the BCM2835 ISP generated statistics for the current frame. This
+ is sent in the Request metadata if the StatsOutputEnable is set to true.
+ The statistics struct definition can be found in include/linux/bcm2835-isp.h.
+
+ \sa StatsOutputEnable
+
+...
diff --git a/src/libcamera/control_ranges.yaml b/src/libcamera/control_ranges.yaml
new file mode 100644
index 00000000..d42447d0
--- /dev/null
+++ b/src/libcamera/control_ranges.yaml
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Specifies the control id ranges/offsets for core/draft libcamera and vendor
+# controls and properties.
+ranges:
+ # Core libcamera controls
+ libcamera: 0
+ # Draft designated libcamera controls
+ draft: 10000
+ # Raspberry Pi vendor controls
+ rpi: 20000
+ # Next range starts at 30000
+
+...
diff --git a/src/libcamera/control_serializer.cpp b/src/libcamera/control_serializer.cpp
index e87d2362..52fd714f 100644
--- a/src/libcamera/control_serializer.cpp
+++ b/src/libcamera/control_serializer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serializer.cpp - Control (de)serializer
+ * Control (de)serializer
*/
#include "libcamera/internal/control_serializer.h"
@@ -144,7 +144,7 @@ void ControlSerializer::reset()
size_t ControlSerializer::binarySize(const ControlValue &value)
{
- return value.data().size_bytes();
+ return sizeof(ControlType) + value.data().size_bytes();
}
size_t ControlSerializer::binarySize(const ControlInfo &info)
@@ -195,6 +195,8 @@ size_t ControlSerializer::binarySize(const ControlList &list)
void ControlSerializer::store(const ControlValue &value,
ByteStreamBuffer &buffer)
{
+ const ControlType type = value.type();
+ buffer.write(&type);
buffer.write(value.data());
}
@@ -379,11 +381,13 @@ int ControlSerializer::serialize(const ControlList &list,
return 0;
}
-ControlValue ControlSerializer::loadControlValue(ControlType type,
- ByteStreamBuffer &buffer,
+ControlValue ControlSerializer::loadControlValue(ByteStreamBuffer &buffer,
bool isArray,
unsigned int count)
{
+ ControlType type;
+ buffer.read(&type);
+
ControlValue value;
value.reserve(type, isArray, count);
@@ -392,15 +396,11 @@ ControlValue ControlSerializer::loadControlValue(ControlType type,
return value;
}
-ControlInfo ControlSerializer::loadControlInfo(ControlType type,
- ByteStreamBuffer &b)
+ControlInfo ControlSerializer::loadControlInfo(ByteStreamBuffer &b)
{
- if (type == ControlTypeString)
- type = ControlTypeInteger32;
-
- ControlValue min = loadControlValue(type, b);
- ControlValue max = loadControlValue(type, b);
- ControlValue def = loadControlValue(type, b);
+ ControlValue min = loadControlValue(b);
+ ControlValue max = loadControlValue(b);
+ ControlValue def = loadControlValue(b);
return ControlInfo(min, max, def);
}
@@ -513,7 +513,7 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
}
/* Create and store the ControlInfo. */
- ctrls.emplace(controlId, loadControlInfo(type, values));
+ ctrls.emplace(controlId, loadControlInfo(values));
}
/*
@@ -624,10 +624,8 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
return {};
}
- ControlType type = static_cast<ControlType>(entry->type);
ctrls.set(entry->id,
- loadControlValue(type, values, entry->is_array,
- entry->count));
+ loadControlValue(values, entry->is_array, entry->count));
}
return ctrls;
diff --git a/src/libcamera/control_validator.cpp b/src/libcamera/control_validator.cpp
index cf08b34a..93982cff 100644
--- a/src/libcamera/control_validator.cpp
+++ b/src/libcamera/control_validator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_validator.cpp - Control validator
+ * Control validator
*/
#include "libcamera/internal/control_validator.h"
diff --git a/src/libcamera/controls.cpp b/src/libcamera/controls.cpp
index 03ac6345..11d35321 100644
--- a/src/libcamera/controls.cpp
+++ b/src/libcamera/controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.cpp - Control handling
+ * Control handling
*/
#include <libcamera/controls.h>
@@ -677,6 +677,9 @@ ControlInfoMap::ControlInfoMap(Map &&info, const ControlIdMap &idmap)
bool ControlInfoMap::validate()
{
+ if (!idmap_)
+ return false;
+
for (const auto &ctrl : *this) {
const ControlId *id = ctrl.first;
auto it = idmap_->find(id->id());
@@ -719,6 +722,8 @@ bool ControlInfoMap::validate()
*/
ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
{
+ ASSERT(idmap_);
+
return at(idmap_->at(id));
}
@@ -729,6 +734,8 @@ ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
*/
const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
{
+ ASSERT(idmap_);
+
return at(idmap_->at(id));
}
@@ -739,6 +746,9 @@ const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
*/
ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
{
+ if (!idmap_)
+ return 0;
+
/*
* The ControlInfoMap and its idmap have a 1:1 mapping between their
* entries, we can thus just count the matching entries in idmap to
@@ -755,6 +765,9 @@ ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
*/
ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
{
+ if (!idmap_)
+ return end();
+
auto iter = idmap_->find(id);
if (iter == idmap_->end())
return end();
@@ -770,6 +783,9 @@ ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
*/
ControlInfoMap::const_iterator ControlInfoMap::find(unsigned int id) const
{
+ if (!idmap_)
+ return end();
+
auto iter = idmap_->find(id);
if (iter == idmap_->end())
return end();
@@ -892,12 +908,26 @@ ControlList::ControlList(const ControlInfoMap &infoMap,
*/
/**
+ * \enum ControlList::MergePolicy
+ * \brief The policy used by the merge function
+ *
+ * \var ControlList::MergePolicy::KeepExisting
+ * \brief Existing controls in the target list are kept
+ *
+ * \var ControlList::MergePolicy::OverwriteExisting
+ * \brief Existing controls in the target list are updated
+ */
+
+/**
* \brief Merge the \a source into the ControlList
* \param[in] source The ControlList to merge into this object
+ * \param[in] policy Controls if existing elements in *this shall be
+ * overwritten
*
* Merging two control lists copies elements from the \a source and inserts
* them in *this. If the \a source contains elements whose key is already
- * present in *this, then those elements are not overwritten.
+ * present in *this, then those elements are only overwritten if
+ * \a policy is MergePolicy::OverwriteExisting.
*
* Only control lists created from the same ControlIdMap or ControlInfoMap may
* be merged. Attempting to do otherwise results in undefined behaviour.
@@ -905,10 +935,10 @@ ControlList::ControlList(const ControlInfoMap &infoMap,
* \todo Reimplement or implement an overloaded version which internally uses
* std::unordered_map::merge() and accepts a non-const argument.
*/
-void ControlList::merge(const ControlList &source)
+void ControlList::merge(const ControlList &source, MergePolicy policy)
{
/**
- * \todo: ASSERT that the current and source ControlList are derived
+ * \todo ASSERT that the current and source ControlList are derived
* from a compatible ControlIdMap, to prevent undefined behaviour due to
* id collisions.
*
@@ -920,7 +950,7 @@ void ControlList::merge(const ControlList &source)
*/
for (const auto &ctrl : source) {
- if (contains(ctrl.first)) {
+ if (policy == MergePolicy::KeepExisting && contains(ctrl.first)) {
const ControlId *id = idmap_->at(ctrl.first);
LOG(Controls, Warning)
<< "Control " << id->name() << " not overwritten";
@@ -933,17 +963,6 @@ void ControlList::merge(const ControlList &source)
/**
* \brief Check if the list contains a control with the specified \a id
- * \param[in] id The control ID
- *
- * \return True if the list contains a matching control, false otherwise
- */
-bool ControlList::contains(const ControlId &id) const
-{
- return controls_.find(id.id()) != controls_.end();
-}
-
-/**
- * \brief Check if the list contains a control with the specified \a id
* \param[in] id The control numerical ID
*
* \return True if the list contains a matching control, false otherwise
@@ -954,22 +973,20 @@ bool ControlList::contains(unsigned int id) const
}
/**
- * \fn template<typename T> T ControlList::get(const Control<T> &ctrl) const
+ * \fn ControlList::get(const Control<T> &ctrl) const
* \brief Get the value of control \a ctrl
* \param[in] ctrl The control
*
- * The behaviour is undefined if the control \a ctrl is not present in the
- * list. Use ControlList::contains() to test for the presence of a control in
- * the list before retrieving its value.
- *
- * The control value type shall match the type T, otherwise the behaviour is
- * undefined.
+ * Beside getting the value of a control, this function can also be used to
+ * check if a control is present in the ControlList by converting the returned
+ * std::optional<T> to bool (or calling its has_value() function).
*
- * \return The control value
+ * \return A std::optional<T> containing the control value, or std::nullopt if
+ * the control \a ctrl is not present in the list
*/
/**
- * \fn template<typename T, typename V> void ControlList::set(const Control<T> &ctrl, const V &value)
+ * \fn ControlList::set(const Control<T> &ctrl, const V &value)
* \brief Set the control \a ctrl value to \a value
* \param[in] ctrl The control
* \param[in] value The control value
@@ -983,8 +1000,7 @@ bool ControlList::contains(unsigned int id) const
*/
/**
- * \fn template<typename T, typename V> \
- * void ControlList::set(const Control<T> &ctrl, const std::initializer_list<V> &value)
+ * \fn ControlList::set(const Control<Span<T, Size>> &ctrl, const std::initializer_list<V> &value)
* \copydoc ControlList::set(const Control<T> &ctrl, const V &value)
*/
diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp
new file mode 100644
index 00000000..d3d38c1b
--- /dev/null
+++ b/src/libcamera/converter.cpp
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright 2022 NXP
+ *
+ * Generic format converter interface
+ */
+
+#include "libcamera/internal/converter.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_device.h"
+
+/**
+ * \file internal/converter.h
+ * \brief Abstract converter
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Converter)
+
+/**
+ * \class Converter
+ * \brief Abstract Base Class for converter
+ *
+ * The Converter class is an Abstract Base Class defining the interfaces of
+ * converter implementations.
+ *
+ * Converters offer scaling and pixel format conversion services on an input
+ * stream. The converter can output multiple streams with individual conversion
+ * parameters from the same input stream.
+ */
+
+/**
+ * \brief Construct a Converter instance
+ * \param[in] media The media device implementing the converter
+ *
+ * This searches for the entity implementing the data streaming function in the
+ * media graph entities and use its device node as the converter device node.
+ */
+Converter::Converter(MediaDevice *media)
+{
+ const std::vector<MediaEntity *> &entities = media->entities();
+ auto it = std::find_if(entities.begin(), entities.end(),
+ [](MediaEntity *entity) {
+ return entity->function() == MEDIA_ENT_F_IO_V4L;
+ });
+ if (it == entities.end()) {
+ LOG(Converter, Error)
+ << "No entity suitable for implementing a converter in "
+ << media->driver() << " entities list.";
+ return;
+ }
+
+ deviceNode_ = (*it)->deviceNode();
+}
+
+Converter::~Converter()
+{
+}
+
+/**
+ * \fn Converter::loadConfiguration()
+ * \brief Load converter configuration from file
+ * \param[in] filename The file name path
+ *
+ * Load converter dependent configuration parameters to apply on the hardware.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::isValid()
+ * \brief Check if the converter configuration is valid
+ * \return True is the converter is valid, false otherwise
+ */
+
+/**
+ * \fn Converter::formats()
+ * \brief Retrieve the list of supported pixel formats for an input pixel format
+ * \param[in] input Input pixel format to retrieve output pixel format list for
+ * \return The list of supported output pixel formats
+ */
+
+/**
+ * \fn Converter::sizes()
+ * \brief Retrieve the range of minimum and maximum output sizes for an input size
+ * \param[in] input Input stream size to retrieve range for
+ * \return A range of output image sizes
+ */
+
+/**
+ * \fn Converter::strideAndFrameSize()
+ * \brief Retrieve the output stride and frame size for an input configutation
+ * \param[in] pixelFormat Input stream pixel format
+ * \param[in] size Input stream size
+ * \return A tuple indicating the stride and frame size or an empty tuple on error
+ */
+
+/**
+ * \fn Converter::configure()
+ * \brief Configure a set of output stream conversion from an input stream
+ * \param[in] inputCfg Input stream configuration
+ * \param[out] outputCfgs A list of output stream configurations
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::exportBuffers()
+ * \brief Export buffers from the converter device
+ * \param[in] output Output stream index exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store allocated buffers
+ *
+ * This function operates similarly to V4L2VideoDevice::exportBuffers() on the
+ * output stream indicated by the \a output index.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+
+/**
+ * \fn Converter::start()
+ * \brief Start the converter streaming operation
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::stop()
+ * \brief Stop the converter streaming operation
+ */
+
+/**
+ * \fn Converter::queueBuffers()
+ * \brief Queue buffers to converter device
+ * \param[in] input The frame buffer to apply the conversion
+ * \param[out] outputs The container holding the output stream indexes and
+ * their respective frame buffer outputs.
+ *
+ * This function queues the \a input frame buffer on the output streams of the
+ * \a outputs map key and retrieve the output frame buffer indicated by the
+ * buffer map value.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \var Converter::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var Converter::outputBufferReady
+ * \brief A signal emitted on each frame buffer completion of the output queue
+ */
+
+/**
+ * \fn Converter::deviceNode()
+ * \brief The converter device node attribute accessor
+ * \return The converter device node string
+ */
+
+/**
+ * \class ConverterFactoryBase
+ * \brief Base class for converter factories
+ *
+ * The ConverterFactoryBase class is the base of all specializations of the
+ * ConverterFactory class template. It implements the factory registration,
+ * maintains a registry of factories, and provides access to the registered
+ * factories.
+ */
+
+/**
+ * \brief Construct a converter factory base
+ * \param[in] name Name of the converter class
+ * \param[in] compatibles Name aliases of the converter class
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used as unique identifier. If the converter
+ * implementation fully relies on a generic framework, the name should be the
+ * same as the framework. Otherwise, if the implementation is specialized, the
+ * factory name should match the driver name implementing the function.
+ *
+ * The factory \a compatibles holds a list of driver names implementing a generic
+ * subsystem without any personalizations.
+ */
+ConverterFactoryBase::ConverterFactoryBase(const std::string name, std::initializer_list<std::string> compatibles)
+ : name_(name), compatibles_(compatibles)
+{
+ registerType(this);
+}
+
+/**
+ * \fn ConverterFactoryBase::compatibles()
+ * \return The list of compatible name aliases of the converter
+ */
+
+/**
+ * \brief Create an instance of the converter corresponding to the media device
+ * \param[in] media The media device to create the converter for
+ *
+ * The converter is created by matching the factory name or any of its
+ * compatible aliases with the media device driver name.
+ *
+ * \return A new instance of the converter subclass corresponding to the media
+ * device, or null if the media device driver name doesn't match anything
+ */
+std::unique_ptr<Converter> ConverterFactoryBase::create(MediaDevice *media)
+{
+ const std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (const ConverterFactoryBase *factory : factories) {
+ const std::vector<std::string> &compatibles = factory->compatibles();
+ auto it = std::find(compatibles.begin(), compatibles.end(), media->driver());
+
+ if (it == compatibles.end() && media->driver() != factory->name_)
+ continue;
+
+ LOG(Converter, Debug)
+ << "Creating converter from "
+ << factory->name_ << " factory with "
+ << (it == compatibles.end() ? "no" : media->driver()) << " alias.";
+
+ std::unique_ptr<Converter> converter = factory->createInstance(media);
+ if (converter->isValid())
+ return converter;
+ }
+
+ return nullptr;
+}
+
+/**
+ * \brief Add a converter factory to the registry
+ * \param[in] factory Factory to use to construct the converter class
+ *
+ * The caller is responsible to guarantee the uniqueness of the converter
+ * factory name.
+ */
+void ConverterFactoryBase::registerType(ConverterFactoryBase *factory)
+{
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ factories.push_back(factory);
+}
+
+/**
+ * \brief Retrieve the list of all converter factory names
+ * \return The list of all converter factory names
+ */
+std::vector<std::string> ConverterFactoryBase::names()
+{
+ std::vector<std::string> list;
+
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (ConverterFactoryBase *factory : factories) {
+ list.push_back(factory->name_);
+ for (auto alias : factory->compatibles())
+ list.push_back(alias);
+ }
+
+ return list;
+}
+
+/**
+ * \brief Retrieve the list of all converter factories
+ * \return The list of converter factories
+ */
+std::vector<ConverterFactoryBase *> &ConverterFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<ConverterFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \var ConverterFactoryBase::name_
+ * \brief The name of the factory
+ */
+
+/**
+ * \var ConverterFactoryBase::compatibles_
+ * \brief The list holding the factory compatibles
+ */
+
+/**
+ * \class ConverterFactory
+ * \brief Registration of ConverterFactory classes and creation of instances
+ * \param _Converter The converter class type for this factory
+ *
+ * To facilitate discovery and instantiation of Converter classes, the
+ * ConverterFactory class implements auto-registration of converter helpers.
+ * Each Converter subclass shall register itself using the REGISTER_CONVERTER()
+ * macro, which will create a corresponding instance of a ConverterFactory
+ * subclass and register it with the static list of factories.
+ */
+
+/**
+ * \fn ConverterFactory::ConverterFactory(const char *name, std::initializer_list<std::string> compatibles)
+ * \brief Construct a converter factory
+ * \details \copydetails ConverterFactoryBase::ConverterFactoryBase
+ */
+
+/**
+ * \fn ConverterFactory::createInstance() const
+ * \brief Create an instance of the Converter corresponding to the factory
+ * \param[in] media Media device pointer
+ * \return A unique pointer to a newly constructed instance of the Converter
+ * subclass corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_CONVERTER
+ * \brief Register a converter with the Converter factory
+ * \param[in] name Converter name used to register the class
+ * \param[in] converter Class name of Converter derived class to register
+ * \param[in] compatibles List of compatible names
+ *
+ * Register a Converter subclass with the factory and make it available to try
+ * and match converters.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp
index 77c44fc8..d8929fc5 100644
--- a/src/libcamera/pipeline/simple/converter.cpp
+++ b/src/libcamera/converter/converter_v4l2_m2m.cpp
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Laurent Pinchart
+ * Copyright 2022 NXP
*
- * converter.cpp - Format converter for simple pipeline handler
+ * V4L2 M2M Format converter
*/
-#include "converter.h"
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
#include <algorithm>
#include <limits.h>
@@ -21,18 +22,23 @@
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_videodevice.h"
+/**
+ * \file internal/converter/converter_v4l2_m2m.h
+ * \brief V4L2 M2M based converter
+ */
+
namespace libcamera {
-LOG_DECLARE_CATEGORY(SimplePipeline)
+LOG_DECLARE_CATEGORY(Converter)
/* -----------------------------------------------------------------------------
- * SimpleConverter::Stream
+ * V4L2M2MConverter::Stream
*/
-SimpleConverter::Stream::Stream(SimpleConverter *converter, unsigned int index)
+V4L2M2MConverter::Stream::Stream(V4L2M2MConverter *converter, unsigned int index)
: converter_(converter), index_(index)
{
- m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode_);
+ m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
@@ -42,11 +48,11 @@ SimpleConverter::Stream::Stream(SimpleConverter *converter, unsigned int index)
m2m_.reset();
}
-int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg)
+int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg)
{
V4L2PixelFormat videoFormat =
- V4L2PixelFormat::fromPixelFormat(inputCfg.pixelFormat);
+ m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
V4L2DeviceFormat format;
format.fourcc = videoFormat;
@@ -56,14 +62,14 @@ int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
int ret = m2m_->output()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set input format: " << strerror(-ret);
return ret;
}
if (format.fourcc != videoFormat || format.size != inputCfg.size ||
format.planes[0].bpl != inputCfg.stride) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Input format not supported (requested "
<< inputCfg.size << "-" << videoFormat
<< ", got " << format << ")";
@@ -71,20 +77,20 @@ int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
}
/* Set the pixel format and size on the output. */
- videoFormat = V4L2PixelFormat::fromPixelFormat(outputCfg.pixelFormat);
+ videoFormat = m2m_->capture()->toV4L2PixelFormat(outputCfg.pixelFormat);
format = {};
format.fourcc = videoFormat;
format.size = outputCfg.size;
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set output format: " << strerror(-ret);
return ret;
}
if (format.fourcc != videoFormat || format.size != outputCfg.size) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Output format not supported";
return -EINVAL;
}
@@ -95,13 +101,13 @@ int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
return 0;
}
-int SimpleConverter::Stream::exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+int V4L2M2MConverter::Stream::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
return m2m_->capture()->exportBuffers(count, buffers);
}
-int SimpleConverter::Stream::start()
+int V4L2M2MConverter::Stream::start()
{
int ret = m2m_->output()->importBuffers(inputBufferCount_);
if (ret < 0)
@@ -128,7 +134,7 @@ int SimpleConverter::Stream::start()
return 0;
}
-void SimpleConverter::Stream::stop()
+void V4L2M2MConverter::Stream::stop()
{
m2m_->capture()->streamOff();
m2m_->output()->streamOff();
@@ -136,8 +142,7 @@ void SimpleConverter::Stream::stop()
m2m_->output()->releaseBuffers();
}
-int SimpleConverter::Stream::queueBuffers(FrameBuffer *input,
- FrameBuffer *output)
+int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
{
int ret = m2m_->output()->queueBuffer(input);
if (ret < 0)
@@ -150,12 +155,12 @@ int SimpleConverter::Stream::queueBuffers(FrameBuffer *input,
return 0;
}
-std::string SimpleConverter::Stream::logPrefix() const
+std::string V4L2M2MConverter::Stream::logPrefix() const
{
return "stream" + std::to_string(index_);
}
-void SimpleConverter::Stream::outputBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
{
auto it = converter_->queue_.find(buffer);
if (it == converter_->queue_.end())
@@ -167,32 +172,34 @@ void SimpleConverter::Stream::outputBufferReady(FrameBuffer *buffer)
}
}
-void SimpleConverter::Stream::captureBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::Stream::captureBufferReady(FrameBuffer *buffer)
{
converter_->outputBufferReady.emit(buffer);
}
/* -----------------------------------------------------------------------------
- * SimpleConverter
+ * V4L2M2MConverter
*/
-SimpleConverter::SimpleConverter(MediaDevice *media)
+/**
+ * \class libcamera::V4L2M2MConverter
+ * \brief The V4L2 M2M converter implements the converter interface based on
+ * V4L2 M2M device.
+*/
+
+/**
+ * \fn V4L2M2MConverter::V4L2M2MConverter
+ * \brief Construct a V4L2M2MConverter instance
+ * \param[in] media The media device implementing the converter
+ */
+
+V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media)
+ : Converter(media)
{
- /*
- * Locate the video node. There's no need to validate the pipeline
- * further, the caller guarantees that this is a V4L2 mem2mem device.
- */
- const std::vector<MediaEntity *> &entities = media->entities();
- auto it = std::find_if(entities.begin(), entities.end(),
- [](MediaEntity *entity) {
- return entity->function() == MEDIA_ENT_F_IO_V4L;
- });
- if (it == entities.end())
+ if (deviceNode().empty())
return;
- deviceNode_ = (*it)->deviceNode();
-
- m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode_);
+ m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode());
int ret = m2m_->open();
if (ret < 0) {
m2m_.reset();
@@ -200,7 +207,21 @@ SimpleConverter::SimpleConverter(MediaDevice *media)
}
}
-std::vector<PixelFormat> SimpleConverter::formats(PixelFormat input)
+/**
+ * \fn libcamera::V4L2M2MConverter::loadConfiguration
+ * \details \copydetails libcamera::Converter::loadConfiguration
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::isValid
+ * \details \copydetails libcamera::Converter::isValid
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::formats
+ * \details \copydetails libcamera::Converter::formats
+ */
+std::vector<PixelFormat> V4L2M2MConverter::formats(PixelFormat input)
{
if (!m2m_)
return {};
@@ -210,16 +231,22 @@ std::vector<PixelFormat> SimpleConverter::formats(PixelFormat input)
* enumerate the conversion capabilities on its output (V4L2 capture).
*/
V4L2DeviceFormat v4l2Format;
- v4l2Format.fourcc = V4L2PixelFormat::fromPixelFormat(input);
+ v4l2Format.fourcc = m2m_->output()->toV4L2PixelFormat(input);
v4l2Format.size = { 1, 1 };
int ret = m2m_->output()->setFormat(&v4l2Format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
+ if (v4l2Format.fourcc != m2m_->output()->toV4L2PixelFormat(input)) {
+ LOG(Converter, Debug)
+ << "Input format " << input << " not supported.";
+ return {};
+ }
+
std::vector<PixelFormat> pixelFormats;
for (const auto &format : m2m_->capture()->formats()) {
@@ -231,7 +258,10 @@ std::vector<PixelFormat> SimpleConverter::formats(PixelFormat input)
return pixelFormats;
}
-SizeRange SimpleConverter::sizes(const Size &input)
+/**
+ * \copydoc libcamera::Converter::sizes
+ */
+SizeRange V4L2M2MConverter::sizes(const Size &input)
{
if (!m2m_)
return {};
@@ -246,7 +276,7 @@ SizeRange SimpleConverter::sizes(const Size &input)
int ret = m2m_->output()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
@@ -256,7 +286,7 @@ SizeRange SimpleConverter::sizes(const Size &input)
format.size = { 1, 1 };
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
@@ -266,7 +296,7 @@ SizeRange SimpleConverter::sizes(const Size &input)
format.size = { UINT_MAX, UINT_MAX };
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
@@ -276,12 +306,15 @@ SizeRange SimpleConverter::sizes(const Size &input)
return sizes;
}
+/**
+ * \copydoc libcamera::Converter::strideAndFrameSize
+ */
std::tuple<unsigned int, unsigned int>
-SimpleConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
- const Size &size)
+V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
+ const Size &size)
{
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(pixelFormat);
+ format.fourcc = m2m_->capture()->toV4L2PixelFormat(pixelFormat);
format.size = size;
int ret = m2m_->capture()->tryFormat(&format);
@@ -291,8 +324,11 @@ SimpleConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
}
-int SimpleConverter::configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+/**
+ * \copydoc libcamera::Converter::configure
+ */
+int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
{
int ret = 0;
@@ -303,7 +339,7 @@ int SimpleConverter::configure(const StreamConfiguration &inputCfg,
Stream &stream = streams_.emplace_back(this, i);
if (!stream.isValid()) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to create stream " << i;
ret = -EINVAL;
break;
@@ -322,8 +358,11 @@ int SimpleConverter::configure(const StreamConfiguration &inputCfg,
return 0;
}
-int SimpleConverter::exportBuffers(unsigned int output, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+/**
+ * \copydoc libcamera::Converter::exportBuffers
+ */
+int V4L2M2MConverter::exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
if (output >= streams_.size())
return -EINVAL;
@@ -331,7 +370,10 @@ int SimpleConverter::exportBuffers(unsigned int output, unsigned int count,
return streams_[output].exportBuffers(count, buffers);
}
-int SimpleConverter::start()
+/**
+ * \copydoc libcamera::Converter::start
+ */
+int V4L2M2MConverter::start()
{
int ret;
@@ -346,14 +388,20 @@ int SimpleConverter::start()
return 0;
}
-void SimpleConverter::stop()
+/**
+ * \copydoc libcamera::Converter::stop
+ */
+void V4L2M2MConverter::stop()
{
for (Stream &stream : utils::reverse(streams_))
stream.stop();
}
-int SimpleConverter::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
+/**
+ * \copydoc libcamera::Converter::queueBuffers
+ */
+int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs)
{
unsigned int mask = 0;
int ret;
@@ -396,4 +444,11 @@ int SimpleConverter::queueBuffers(FrameBuffer *input,
return 0;
}
+static std::initializer_list<std::string> compatibles = {
+ "mtk-mdp",
+ "pxp",
+};
+
+REGISTER_CONVERTER("v4l2_m2m", V4L2M2MConverter, compatibles)
+
} /* namespace libcamera */
diff --git a/src/libcamera/converter/meson.build b/src/libcamera/converter/meson.build
new file mode 100644
index 00000000..2aa72fe4
--- /dev/null
+++ b/src/libcamera/converter/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'converter_v4l2_m2m.cpp'
+])
diff --git a/src/libcamera/delayed_controls.cpp b/src/libcamera/delayed_controls.cpp
index 9667187e..94d0a575 100644
--- a/src/libcamera/delayed_controls.cpp
+++ b/src/libcamera/delayed_controls.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * delayed_controls.h - Helper to deal with controls that take effect with a delay
+ * Helper to deal with controls that take effect with a delay
*/
#include "libcamera/internal/delayed_controls.h"
@@ -115,8 +115,6 @@ DelayedControls::DelayedControls(V4L2Device *device,
*/
void DelayedControls::reset()
{
- running_ = false;
- firstSequence_ = 0;
queueCount_ = 1;
writeCount_ = 0;
@@ -204,8 +202,7 @@ bool DelayedControls::push(const ControlList &controls)
*/
ControlList DelayedControls::get(uint32_t sequence)
{
- uint32_t adjustedSeq = sequence - firstSequence_;
- unsigned int index = std::max<int>(0, adjustedSeq - maxDelay_);
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
ControlList out(device_->controls());
for (const auto &ctrl : values_) {
@@ -236,11 +233,6 @@ void DelayedControls::applyControls(uint32_t sequence)
{
LOG(DelayedControls, Debug) << "frame " << sequence << " started";
- if (!running_) {
- firstSequence_ = sequence;
- running_ = true;
- }
-
/*
* Create control list peeking ahead in the value queue to ensure
* values are set in time to satisfy the sensor delay.
@@ -279,7 +271,7 @@ void DelayedControls::applyControls(uint32_t sequence)
}
}
- writeCount_ = sequence - firstSequence_ + 1;
+ writeCount_ = sequence + 1;
while (writeCount_ > queueCount_) {
LOG(DelayedControls, Debug)
diff --git a/src/libcamera/device_enumerator.cpp b/src/libcamera/device_enumerator.cpp
index d1258050..ae17862f 100644
--- a/src/libcamera/device_enumerator.cpp
+++ b/src/libcamera/device_enumerator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * device_enumerator.cpp - Enumeration and matching
+ * Enumeration and matching
*/
#include "libcamera/internal/device_enumerator.h"
@@ -56,7 +56,7 @@ LOG_DEFINE_CATEGORY(DeviceEnumerator)
* names can be added as match criteria.
*
* Pipeline handlers are recommended to add entities to DeviceMatch as
- * appropriare to ensure that the media device they need can be uniquely
+ * appropriate to ensure that the media device they need can be uniquely
* identified. This is useful when the corresponding kernel driver can produce
* different graphs, for instance as a result of different driver versions or
* hardware configurations, and not all those graphs are suitable for a pipeline
@@ -101,8 +101,14 @@ bool DeviceMatch::match(const MediaDevice *device) const
for (const MediaEntity *entity : device->entities()) {
if (name == entity->name()) {
- found = true;
- break;
+ if (!entity->deviceNode().empty()) {
+ found = true;
+ break;
+ } else {
+ LOG(DeviceEnumerator, Debug)
+ << "Skip " << entity->name()
+ << ": no device node";
+ }
}
}
@@ -161,7 +167,7 @@ std::unique_ptr<DeviceEnumerator> DeviceEnumerator::create()
DeviceEnumerator::~DeviceEnumerator()
{
- for (std::shared_ptr<MediaDevice> media : devices_) {
+ for (const std::shared_ptr<MediaDevice> &media : devices_) {
if (media->busy())
LOG(DeviceEnumerator, Error)
<< "Removing media device " << media->deviceNode()
diff --git a/src/libcamera/device_enumerator_sysfs.cpp b/src/libcamera/device_enumerator_sysfs.cpp
index 686bb809..fc33ba52 100644
--- a/src/libcamera/device_enumerator_sysfs.cpp
+++ b/src/libcamera/device_enumerator_sysfs.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * device_enumerator_sysfs.cpp - sysfs-based device enumerator
+ * sysfs-based device enumerator
*/
#include "libcamera/internal/device_enumerator_sysfs.h"
diff --git a/src/libcamera/device_enumerator_udev.cpp b/src/libcamera/device_enumerator_udev.cpp
index 5317afbd..01c70b6d 100644
--- a/src/libcamera/device_enumerator_udev.cpp
+++ b/src/libcamera/device_enumerator_udev.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * device_enumerator_udev.cpp - udev-based device enumerator
+ * udev-based device enumerator
*/
#include "libcamera/internal/device_enumerator_udev.h"
@@ -13,6 +13,7 @@
#include <list>
#include <map>
#include <string.h>
+#include <string_view>
#include <sys/ioctl.h>
#include <sys/sysmacros.h>
#include <unistd.h>
@@ -315,6 +316,7 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
* enumerator.
*/
deps->deps_.erase(devnum);
+ devMap_.erase(it);
if (deps->deps_.empty()) {
LOG(DeviceEnumerator, Debug)
@@ -330,18 +332,18 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
void DeviceEnumeratorUdev::udevNotify()
{
struct udev_device *dev = udev_monitor_receive_device(monitor_);
- std::string action(udev_device_get_action(dev));
- std::string deviceNode(udev_device_get_devnode(dev));
+ std::string_view action(udev_device_get_action(dev));
+ std::string_view deviceNode(udev_device_get_devnode(dev));
LOG(DeviceEnumerator, Debug)
- << action << " device " << udev_device_get_devnode(dev);
+ << action << " device " << deviceNode;
if (action == "add") {
addUdevDevice(dev);
} else if (action == "remove") {
const char *subsystem = udev_device_get_subsystem(dev);
if (subsystem && !strcmp(subsystem, "media"))
- removeDevice(deviceNode);
+ removeDevice(std::string(deviceNode));
}
udev_device_unref(dev);
diff --git a/src/libcamera/dma_heaps.cpp b/src/libcamera/dma_heaps.cpp
new file mode 100644
index 00000000..d4cb880b
--- /dev/null
+++ b/src/libcamera/dma_heaps.cpp
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper class for dma-heap allocations.
+ */
+
+#include "libcamera/internal/dma_heaps.h"
+
+#include <array>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file dma_heaps.cpp
+ * \brief dma-heap allocator
+ */
+
+namespace libcamera {
+
+/*
+ * /dev/dma_heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
+ * to only have to worry about importing.
+ *
+ * Annoyingly, should the cma heap size be specified on the kernel command line
+ * instead of DT, the heap gets named "reserved" instead.
+ */
+
+#ifndef __DOXYGEN__
+struct DmaHeapInfo {
+ DmaHeap::DmaHeapFlag type;
+ const char *deviceNodeName;
+};
+#endif
+
+static constexpr std::array<DmaHeapInfo, 3> heapInfos = { {
+ { DmaHeap::DmaHeapFlag::Cma, "/dev/dma_heap/linux,cma" },
+ { DmaHeap::DmaHeapFlag::Cma, "/dev/dma_heap/reserved" },
+ { DmaHeap::DmaHeapFlag::System, "/dev/dma_heap/system" },
+} };
+
+LOG_DEFINE_CATEGORY(DmaHeap)
+
+/**
+ * \class DmaHeap
+ * \brief Helper class for dma-heap allocations
+ *
+ * DMA heaps are kernel devices that provide an API to allocate memory from
+ * different pools called "heaps", wrap each allocated piece of memory in a
+ * dmabuf object, and return the dmabuf file descriptor to userspace. Multiple
+ * heaps can be provided by the system, with different properties for the
+ * underlying memory.
+ *
+ * This class wraps a DMA heap selected at construction time, and exposes
+ * functions to manage memory allocation.
+ */
+
+/**
+ * \enum DmaHeap::DmaHeapFlag
+ * \brief Type of the dma-heap
+ * \var DmaHeap::Cma
+ * \brief Allocate from a CMA dma-heap, providing physically-contiguous memory
+ * \var DmaHeap::System
+ * \brief Allocate from the system dma-heap, using the page allocator
+ */
+
+/**
+ * \typedef DmaHeap::DmaHeapFlags
+ * \brief A bitwise combination of DmaHeap::DmaHeapFlag values
+ */
+
+/**
+ * \brief Construct a DmaHeap of a given type
+ * \param[in] type The type(s) of the dma-heap(s) to allocate from
+ *
+ * The DMA heap type is selected with the \a type parameter, which defaults to
+ * the CMA heap. If no heap of the given type can be accessed, the constructed
+ * DmaHeap instance is invalid as indicated by the isValid() function.
+ *
+ * Multiple types can be selected by combining type flags, in which case the
+ * constructed DmaHeap will match one of the types. If the system provides
+ * multiple heaps that match the requested types, which heap is used is
+ * undefined.
+ */
+DmaHeap::DmaHeap(DmaHeapFlags type)
+{
+ for (const auto &info : heapInfos) {
+ if (!(type & info.type))
+ continue;
+
+ int ret = ::open(info.deviceNodeName, O_RDWR | O_CLOEXEC, 0);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaHeap, Debug)
+ << "Failed to open " << info.deviceNodeName << ": "
+ << strerror(ret);
+ continue;
+ }
+
+ LOG(DmaHeap, Debug) << "Using " << info.deviceNodeName;
+ dmaHeapHandle_ = UniqueFD(ret);
+ break;
+ }
+
+ if (!dmaHeapHandle_.isValid())
+ LOG(DmaHeap, Error) << "Could not open any dmaHeap device";
+}
+
+/**
+ * \brief Destroy the DmaHeap instance
+ */
+DmaHeap::~DmaHeap() = default;
+
+/**
+ * \fn DmaHeap::isValid()
+ * \brief Check if the DmaHeap instance is valid
+ * \return True if the DmaHeap is valid, false otherwise
+ */
+
+/**
+ * \brief Allocate a dma-buf from the DmaHeap
+ * \param [in] name The name to set for the allocated buffer
+ * \param [in] size The size of the buffer to allocate
+ *
+ * Allocates a dma-buf with read/write access.
+ *
+ * If the allocation fails, return an invalid UniqueFD.
+ *
+ * \return The UniqueFD of the allocated buffer
+ */
+UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
+{
+ int ret;
+
+ if (!name)
+ return {};
+
+ struct dma_heap_allocation_data alloc = {};
+
+ alloc.len = size;
+ alloc.fd_flags = O_CLOEXEC | O_RDWR;
+
+ ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
+ if (ret < 0) {
+ LOG(DmaHeap, Error) << "dmaHeap allocation failure for " << name;
+ return {};
+ }
+
+ UniqueFD allocFd(alloc.fd);
+ ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
+ if (ret < 0) {
+ LOG(DmaHeap, Error) << "dmaHeap naming failure for " << name;
+ return {};
+ }
+
+ return allocFd;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/fence.cpp b/src/libcamera/fence.cpp
index 7b784778..634c74f8 100644
--- a/src/libcamera/fence.cpp
+++ b/src/libcamera/fence.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * fence.cpp - Synchronization fence
+ * Synchronization fence
*/
#include "libcamera/fence.h"
diff --git a/src/libcamera/formats.cpp b/src/libcamera/formats.cpp
index 283ecb3d..cf41f2c2 100644
--- a/src/libcamera/formats.cpp
+++ b/src/libcamera/formats.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * formats.cpp - libcamera image formats
+ * libcamera image formats
*/
#include "libcamera/internal/formats.h"
@@ -33,7 +33,7 @@ LOG_DEFINE_CATEGORY(Formats)
* used in pipeline handlers.
*
* \var PixelFormatInfo::name
- * \brief The format name as a human-readable string, used as the test
+ * \brief The format name as a human-readable string, used as the text
* representation of the PixelFormat
*
* \var PixelFormatInfo::format
@@ -42,19 +42,16 @@ LOG_DEFINE_CATEGORY(Formats)
* \var PixelFormatInfo::v4l2Formats
* \brief The V4L2 pixel formats corresponding to the PixelFormat
*
- * Multiple V4L2 formats may exist for one PixelFormat when the format uses
- * multiple planes, as V4L2 defines separate 4CCs for contiguous and separate
- * planes formats. The two entries in the array store the contiguous and
- * non-contiguous V4L2 formats respectively. If the PixelFormat isn't a
- * multiplanar format, or if no corresponding non-contiguous V4L2 format
- * exists, the second entry is invalid.
+ * Multiple V4L2 formats may exist for one PixelFormat, as V4L2 defines
+ * separate 4CCs for contiguous and non-contiguous versions of the same image
+ * format.
*
* \var PixelFormatInfo::bitsPerPixel
* \brief The average number of bits per pixel
*
- * The number per pixel averages the total number of bits for all colour
- * components over the whole image, excluding any padding bits or padding
- * pixels.
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
*
* For formats that store pixels with bit padding within words, only the
* effective bits are taken into account. For instance, 12-bit Bayer data
@@ -156,10 +153,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGB565, {
.name = "RGB565",
.format = formats::RGB565,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB565),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -169,10 +163,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGB565_BE, {
.name = "RGB565_BE",
.format = formats::RGB565_BE,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB565X),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565X), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -182,10 +173,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::BGR888, {
.name = "BGR888",
.format = formats::BGR888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -195,10 +183,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGB888, {
.name = "RGB888",
.format = formats::RGB888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_BGR24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -208,10 +193,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::XRGB8888, {
.name = "XRGB8888",
.format = formats::XRGB8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_XBGR32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XBGR32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -221,10 +203,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::XBGR8888, {
.name = "XBGR8888",
.format = formats::XBGR8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGBX32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBX32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -234,10 +213,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGBX8888, {
.name = "RGBX8888",
.format = formats::RGBX8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_BGRX32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRX32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -247,10 +223,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::BGRX8888, {
.name = "BGRX8888",
.format = formats::BGRX8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_XRGB32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XRGB32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -260,10 +233,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::ABGR8888, {
.name = "ABGR8888",
.format = formats::ABGR8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGBA32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBA32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -273,10 +243,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::ARGB8888, {
.name = "ARGB8888",
.format = formats::ARGB8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_ABGR32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ABGR32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -286,10 +253,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::BGRA8888, {
.name = "BGRA8888",
.format = formats::BGRA8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_ARGB32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ARGB32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -299,25 +263,39 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGBA8888, {
.name = "RGBA8888",
.format = formats::RGBA8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRA32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::BGR161616, {
+ .name = "BGR161616",
+ .format = formats::BGR161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB161616, {
+ .name = "RGB161616",
+ .format = formats::RGB161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* YUV packed formats. */
{ formats::YUYV, {
.name = "YUYV",
.format = formats::YUYV,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUYV), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -327,10 +305,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YVYU, {
.name = "YVYU",
.format = formats::YVYU,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YVYU),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVYU), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -340,10 +315,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::UYVY, {
.name = "UYVY",
.format = formats::UYVY,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_UYVY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_UYVY), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -353,24 +325,41 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::VYUY, {
.name = "VYUY",
.format = formats::VYUY,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_VYUY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_VYUY), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 2,
.planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::AVUY8888, {
+ .name = "AVUY8888",
+ .format = formats::AVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XVUY8888, {
+ .name = "XVUY8888",
+ .format = formats::XVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* YUV planar formats. */
{ formats::NV12, {
.name = "NV12",
.format = formats::NV12,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV12),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -382,8 +371,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV21",
.format = formats::NV21,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV21),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -395,8 +384,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV16",
.format = formats::NV16,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV16),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -408,8 +397,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV61",
.format = formats::NV61,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV61),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -420,10 +409,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::NV24, {
.name = "NV24",
.format = formats::NV24,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -433,10 +419,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::NV42, {
.name = "NV42",
.format = formats::NV42,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV42),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV42), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -447,8 +430,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YUV420",
.format = formats::YUV420,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -460,8 +443,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YVU420",
.format = formats::YVU420,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -473,8 +456,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YUV422",
.format = formats::YUV422,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -485,10 +468,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YVU422, {
.name = "YVU422",
.format = formats::YVU422,
- .v4l2Formats = {
- .single = V4L2PixelFormat(),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YVU422M),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU422M), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -498,10 +478,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YUV444, {
.name = "YUV444",
.format = formats::YUV444,
- .v4l2Formats = {
- .single = V4L2PixelFormat(),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -511,10 +488,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YVU444, {
.name = "YVU444",
.format = formats::YVU444,
- .v4l2Formats = {
- .single = V4L2PixelFormat(),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YVU444M),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -526,10 +500,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::R8, {
.name = "R8",
.format = formats::R8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_GREY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_GREY), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -539,51 +510,59 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::R10, {
.name = "R10",
.format = formats::R10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::R10_CSI2P, {
+ .name = "R10_CSI2P",
+ .format = formats::R10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
{ formats::R12, {
.name = "R12",
.format = formats::R12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
- { formats::R10_CSI2P, {
- .name = "R10_CSI2P",
- .format = formats::R10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y10P),
- .multi = V4L2PixelFormat(),
- },
- .bitsPerPixel = 10,
+ { formats::R16, {
+ .name = "R16",
+ .format = formats::R16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::MONO_PISP_COMP1, {
+ .name = "MONO_PISP_COMP1",
+ .format = formats::MONO_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO), },
+ .bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = true,
- .pixelsPerGroup = 4,
- .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
} },
/* Bayer formats. */
{ formats::SBGGR8, {
.name = "SBGGR8",
.format = formats::SBGGR8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -593,10 +572,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG8, {
.name = "SGBRG8",
.format = formats::SGBRG8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -606,10 +582,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG8, {
.name = "SGRBG8",
.format = formats::SGRBG8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -619,10 +592,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB8, {
.name = "SRGGB8",
.format = formats::SRGGB8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -632,10 +602,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10, {
.name = "SBGGR10",
.format = formats::SBGGR10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -645,10 +612,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10, {
.name = "SGBRG10",
.format = formats::SGBRG10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -658,10 +622,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10, {
.name = "SGRBG10",
.format = formats::SGRBG10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -671,10 +632,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10, {
.name = "SRGGB10",
.format = formats::SRGGB10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -684,10 +642,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10_CSI2P, {
.name = "SBGGR10_CSI2P",
.format = formats::SBGGR10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -697,10 +652,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10_CSI2P, {
.name = "SGBRG10_CSI2P",
.format = formats::SGBRG10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -710,10 +662,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10_CSI2P, {
.name = "SGRBG10_CSI2P",
.format = formats::SGRBG10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -723,10 +672,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10_CSI2P, {
.name = "SRGGB10_CSI2P",
.format = formats::SRGGB10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -736,10 +682,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR12, {
.name = "SBGGR12",
.format = formats::SBGGR12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -749,10 +692,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG12, {
.name = "SGBRG12",
.format = formats::SGBRG12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -762,10 +702,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG12, {
.name = "SGRBG12",
.format = formats::SGRBG12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -775,10 +712,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB12, {
.name = "SRGGB12",
.format = formats::SRGGB12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -788,10 +722,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR12_CSI2P, {
.name = "SBGGR12_CSI2P",
.format = formats::SBGGR12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -801,10 +732,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG12_CSI2P, {
.name = "SGBRG12_CSI2P",
.format = formats::SGBRG12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -814,10 +742,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG12_CSI2P, {
.name = "SGRBG12_CSI2P",
.format = formats::SGRBG12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -827,23 +752,97 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB12_CSI2P, {
.name = "SRGGB12_CSI2P",
.format = formats::SRGGB12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
.pixelsPerGroup = 2,
.planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::SBGGR14, {
+ .name = "SBGGR14",
+ .format = formats::SBGGR14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14, {
+ .name = "SGBRG14",
+ .format = formats::SGBRG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14, {
+ .name = "SGRBG14",
+ .format = formats::SGRBG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14, {
+ .name = "SRGGB14",
+ .format = formats::SRGGB14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR14_CSI2P, {
+ .name = "SBGGR14_CSI2P",
+ .format = formats::SBGGR14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14_CSI2P, {
+ .name = "SGBRG14_CSI2P",
+ .format = formats::SGBRG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14_CSI2P, {
+ .name = "SGRBG14_CSI2P",
+ .format = formats::SGRBG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14_CSI2P, {
+ .name = "SRGGB14_CSI2P",
+ .format = formats::SRGGB14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
{ formats::SBGGR16, {
.name = "SBGGR16",
.format = formats::SBGGR16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -853,10 +852,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG16, {
.name = "SGBRG16",
.format = formats::SGBRG16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -866,10 +862,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG16, {
.name = "SGRBG16",
.format = formats::SGRBG16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -879,10 +872,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB16, {
.name = "SRGGB16",
.format = formats::SRGGB16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -892,10 +882,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10_IPU3, {
.name = "SBGGR10_IPU3",
.format = formats::SBGGR10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -906,10 +893,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10_IPU3, {
.name = "SGBRG10_IPU3",
.format = formats::SGBRG10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -919,10 +903,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10_IPU3, {
.name = "SGRBG10_IPU3",
.format = formats::SGRBG10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -932,24 +913,60 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10_IPU3, {
.name = "SRGGB10_IPU3",
.format = formats::SRGGB10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
.pixelsPerGroup = 25,
.planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
} },
-
+ { formats::BGGR_PISP_COMP1, {
+ .name = "BGGR_PISP_COMP1",
+ .format = formats::BGGR_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GBRG_PISP_COMP1, {
+ .name = "GBRG_PISP_COMP1",
+ .format = formats::GBRG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GRBG_PISP_COMP1, {
+ .name = "GRBG_PISP_COMP1",
+ .format = formats::GRBG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGGB_PISP_COMP1, {
+ .name = "RGGB_PISP_COMP1",
+ .format = formats::RGGB_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* Compressed formats. */
{ formats::MJPEG, {
.name = "MJPEG",
.format = formats::MJPEG,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
- .multi = V4L2PixelFormat(),
+ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
+ V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
},
.bitsPerPixel = 0,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -987,22 +1004,22 @@ const PixelFormatInfo &PixelFormatInfo::info(const PixelFormat &format)
}
/**
- * \brief Retrieve information about a pixel format
+ * \brief Retrieve information about a V4L2 pixel format
* \param[in] format The V4L2 pixel format
* \return The PixelFormatInfo describing the V4L2 \a format if known, or an
* invalid PixelFormatInfo otherwise
*/
const PixelFormatInfo &PixelFormatInfo::info(const V4L2PixelFormat &format)
{
- const auto &info = std::find_if(pixelFormatInfo.begin(), pixelFormatInfo.end(),
- [format](auto pair) {
- return pair.second.v4l2Formats.single == format ||
- pair.second.v4l2Formats.multi == format;
- });
- if (info == pixelFormatInfo.end())
+ PixelFormat pixelFormat = format.toPixelFormat(false);
+ if (!pixelFormat.isValid())
return pixelFormatInfoInvalid;
- return info->second;
+ const auto iter = pixelFormatInfo.find(pixelFormat);
+ if (iter == pixelFormatInfo.end())
+ return pixelFormatInfoInvalid;
+
+ return iter->second;
}
/**
@@ -1049,7 +1066,7 @@ unsigned int PixelFormatInfo::stride(unsigned int width, unsigned int plane,
return 0;
}
- if (plane > planes.size() || !planes[plane].bytesPerGroup) {
+ if (plane >= planes.size() || !planes[plane].bytesPerGroup) {
LOG(Formats, Warning) << "Invalid plane index, stride is zero";
return 0;
}
diff --git a/src/libcamera/formats.yaml b/src/libcamera/formats.yaml
index 7dda0132..fe027a7c 100644
--- a/src/libcamera/formats.yaml
+++ b/src/libcamera/formats.yaml
@@ -2,7 +2,7 @@
#
# Copyright (C) 2020, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
formats:
- R8:
@@ -11,6 +11,8 @@ formats:
fourcc: DRM_FORMAT_R10
- R12:
fourcc: DRM_FORMAT_R12
+ - R16:
+ fourcc: DRM_FORMAT_R16
- RGB565:
fourcc: DRM_FORMAT_RGB565
@@ -41,6 +43,11 @@ formats:
- BGRA8888:
fourcc: DRM_FORMAT_BGRA8888
+ - RGB161616:
+ fourcc: DRM_FORMAT_RGB161616
+ - BGR161616:
+ fourcc: DRM_FORMAT_BGR161616
+
- YUYV:
fourcc: DRM_FORMAT_YUYV
- YVYU:
@@ -49,6 +56,10 @@ formats:
fourcc: DRM_FORMAT_UYVY
- VYUY:
fourcc: DRM_FORMAT_VYUY
+ - AVUY8888:
+ fourcc: DRM_FORMAT_AVUY8888
+ - XVUY8888:
+ fourcc: DRM_FORMAT_XVUY8888
- NV12:
fourcc: DRM_FORMAT_NV12
@@ -106,6 +117,15 @@ formats:
- SBGGR12:
fourcc: DRM_FORMAT_SBGGR12
+ - SRGGB14:
+ fourcc: DRM_FORMAT_SRGGB14
+ - SGRBG14:
+ fourcc: DRM_FORMAT_SGRBG14
+ - SGBRG14:
+ fourcc: DRM_FORMAT_SGBRG14
+ - SBGGR14:
+ fourcc: DRM_FORMAT_SBGGR14
+
- SRGGB16:
fourcc: DRM_FORMAT_SRGGB16
- SGRBG16:
@@ -145,6 +165,19 @@ formats:
fourcc: DRM_FORMAT_SBGGR12
mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SRGGB14_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG14_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG14_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR14_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
- SRGGB10_IPU3:
fourcc: DRM_FORMAT_SRGGB10
mod: IPU3_FORMAT_MOD_PACKED
@@ -157,4 +190,20 @@ formats:
- SBGGR10_IPU3:
fourcc: DRM_FORMAT_SBGGR10
mod: IPU3_FORMAT_MOD_PACKED
+
+ - RGGB_PISP_COMP1:
+ fourcc: DRM_FORMAT_SRGGB16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GRBG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGRBG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GBRG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGBRG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - BGGR_PISP_COMP1:
+ fourcc: DRM_FORMAT_SBGGR16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - MONO_PISP_COMP1:
+ fourcc: DRM_FORMAT_R16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
...
diff --git a/src/libcamera/framebuffer.cpp b/src/libcamera/framebuffer.cpp
index 7be18560..63d679cb 100644
--- a/src/libcamera/framebuffer.cpp
+++ b/src/libcamera/framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer.cpp - Frame buffer handling
+ * Frame buffer handling
*/
#include <libcamera/framebuffer.h>
@@ -114,9 +114,16 @@ LOG_DEFINE_CATEGORY(Buffer)
* pipeline handlers.
*/
-FrameBuffer::Private::Private()
- : request_(nullptr), isContiguous_(true)
+/**
+ * \brief Construct a FrameBuffer::Private instance
+ * \param[in] planes The frame memory planes
+ * \param[in] cookie Cookie
+ */
+FrameBuffer::Private::Private(const std::vector<Plane> &planes, uint64_t cookie)
+ : planes_(planes), cookie_(cookie), request_(nullptr),
+ isContiguous_(true)
{
+ metadata_.planes_.resize(planes_.size());
}
/**
@@ -195,6 +202,12 @@ FrameBuffer::Private::~Private()
*/
/**
+ * \fn FrameBuffer::Private::metadata()
+ * \brief Retrieve the dynamic metadata
+ * \return Dynamic metadata for the frame contained in the buffer
+ */
+
+/**
* \class FrameBuffer
* \brief Frame buffer data and its associated dynamic metadata
*
@@ -291,29 +304,22 @@ ino_t fileDescriptorInode(const SharedFD &fd)
* \param[in] cookie Cookie
*/
FrameBuffer::FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie)
- : FrameBuffer(std::make_unique<Private>(), planes, cookie)
+ : FrameBuffer(std::make_unique<Private>(planes, cookie))
{
}
/**
- * \brief Construct a FrameBuffer with an extensible private class and an array
- * of planes
+ * \brief Construct a FrameBuffer with an extensible private class
* \param[in] d The extensible private class
- * \param[in] planes The frame memory planes
- * \param[in] cookie Cookie
*/
-FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
- const std::vector<Plane> &planes,
- unsigned int cookie)
- : Extensible(std::move(d)), planes_(planes), cookie_(cookie)
+FrameBuffer::FrameBuffer(std::unique_ptr<Private> d)
+ : Extensible(std::move(d))
{
- metadata_.planes_.resize(planes_.size());
-
unsigned int offset = 0;
bool isContiguous = true;
ino_t inode = 0;
- for (const auto &plane : planes_) {
+ for (const auto &plane : _d()->planes_) {
ASSERT(plane.offset != Plane::kInvalidOffset);
if (plane.offset != offset) {
@@ -325,9 +331,9 @@ FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
* Two different dmabuf file descriptors may still refer to the
* same dmabuf instance. Check this using inodes.
*/
- if (plane.fd != planes_[0].fd) {
+ if (plane.fd != _d()->planes_[0].fd) {
if (!inode)
- inode = fileDescriptorInode(planes_[0].fd);
+ inode = fileDescriptorInode(_d()->planes_[0].fd);
if (fileDescriptorInode(plane.fd) != inode) {
isContiguous = false;
break;
@@ -344,10 +350,13 @@ FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
}
/**
- * \fn FrameBuffer::planes()
* \brief Retrieve the static plane descriptors
* \return Array of plane descriptors
*/
+const std::vector<FrameBuffer::Plane> &FrameBuffer::planes() const
+{
+ return _d()->planes_;
+}
/**
* \brief Retrieve the request this buffer belongs to
@@ -368,13 +377,15 @@ Request *FrameBuffer::request() const
}
/**
- * \fn FrameBuffer::metadata()
* \brief Retrieve the dynamic metadata
* \return Dynamic metadata for the frame contained in the buffer
*/
+const FrameMetadata &FrameBuffer::metadata() const
+{
+ return _d()->metadata_;
+}
/**
- * \fn FrameBuffer::cookie()
* \brief Retrieve the cookie
*
* The cookie belongs to the creator of the FrameBuffer, which controls its
@@ -384,9 +395,12 @@ Request *FrameBuffer::request() const
*
* \return The cookie
*/
+uint64_t FrameBuffer::cookie() const
+{
+ return _d()->cookie_;
+}
/**
- * \fn FrameBuffer::setCookie()
* \brief Set the cookie
* \param[in] cookie Cookie to set
*
@@ -395,6 +409,10 @@ Request *FrameBuffer::request() const
* modify the cookie value of buffers they haven't created themselves. The
* libcamera core never modifies the buffer cookie.
*/
+void FrameBuffer::setCookie(uint64_t cookie)
+{
+ _d()->cookie_ = cookie;
+}
/**
* \brief Extract the Fence associated with this Framebuffer
diff --git a/src/libcamera/framebuffer_allocator.cpp b/src/libcamera/framebuffer_allocator.cpp
index 4df27cac..3d53bde2 100644
--- a/src/libcamera/framebuffer_allocator.cpp
+++ b/src/libcamera/framebuffer_allocator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer_allocator.cpp - FrameBuffer allocator
+ * FrameBuffer allocator
*/
#include <libcamera/framebuffer_allocator.h>
@@ -59,14 +59,11 @@ LOG_DEFINE_CATEGORY(Allocator)
* \param[in] camera The camera
*/
FrameBufferAllocator::FrameBufferAllocator(std::shared_ptr<Camera> camera)
- : camera_(camera)
+ : camera_(std::move(camera))
{
}
-FrameBufferAllocator::~FrameBufferAllocator()
-{
- buffers_.clear();
-}
+FrameBufferAllocator::~FrameBufferAllocator() = default;
/**
* \brief Allocate buffers for a configured stream
@@ -88,16 +85,22 @@ FrameBufferAllocator::~FrameBufferAllocator()
*/
int FrameBufferAllocator::allocate(Stream *stream)
{
- if (buffers_.count(stream)) {
+ const auto &[it, inserted] = buffers_.try_emplace(stream);
+
+ if (!inserted) {
LOG(Allocator, Error) << "Buffers already allocated for stream";
return -EBUSY;
}
- int ret = camera_->exportFrameBuffers(stream, &buffers_[stream]);
+ int ret = camera_->exportFrameBuffers(stream, &it->second);
if (ret == -EINVAL)
LOG(Allocator, Error)
<< "Stream is not part of " << camera_->id()
<< " active configuration";
+
+ if (ret < 0)
+ buffers_.erase(it);
+
return ret;
}
@@ -119,8 +122,6 @@ int FrameBufferAllocator::free(Stream *stream)
if (iter == buffers_.end())
return -EINVAL;
- std::vector<std::unique_ptr<FrameBuffer>> &buffers = iter->second;
- buffers.clear();
buffers_.erase(iter);
return 0;
diff --git a/src/libcamera/geometry.cpp b/src/libcamera/geometry.cpp
index e50b46c5..00015136 100644
--- a/src/libcamera/geometry.cpp
+++ b/src/libcamera/geometry.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.cpp - Geometry-related structures
+ * Geometry-related structures
*/
#include <libcamera/geometry.h>
@@ -95,10 +95,10 @@ std::ostream &operator<<(std::ostream &out, const Point &p)
}
/**
- * \struct Size
+ * \class Size
* \brief Describe a two-dimensional size
*
- * The Size structure defines a two-dimensional size with integer precision.
+ * The Size class defines a two-dimensional size with integer precision.
*/
/**
@@ -455,7 +455,7 @@ std::ostream &operator<<(std::ostream &out, const Size &s)
}
/**
- * \struct SizeRange
+ * \class SizeRange
* \brief Describe a range of sizes
*
* A SizeRange describes a range of sizes included in the [min, max] interval
@@ -589,7 +589,7 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
}
/**
- * \struct Rectangle
+ * \class Rectangle
* \brief Describe a rectangle's position and dimensions
*
* Rectangles are used to identify an area of an image. They are specified by
diff --git a/src/libcamera/ipa/meson.build b/src/libcamera/ipa/meson.build
index 44695240..ef73b3f9 100644
--- a/src/libcamera/ipa/meson.build
+++ b/src/libcamera/ipa/meson.build
@@ -3,13 +3,10 @@
libcamera_ipa_interfaces = []
foreach file : ipa_mojom_files
- name = '@0@'.format(file).split('/')[-1].split('.')[0]
-
# {pipeline}_ipa_interface.cpp
libcamera_ipa_interfaces += \
- custom_target(name + '_ipa_interface_cpp',
- input : file,
- output : name + '_ipa_interface.cpp',
+ custom_target(input : file,
+ output : '@BASENAME@_ipa_interface.cpp',
command : [
mojom_docs_extractor,
'-o', '@OUTPUT@', '@INPUT@'
diff --git a/src/libcamera/ipa_controls.cpp b/src/libcamera/ipa_controls.cpp
index 870a443b..9420c889 100644
--- a/src/libcamera/ipa_controls.cpp
+++ b/src/libcamera/ipa_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_controls.cpp - IPA control handling
+ * IPA control handling
*/
#include <libcamera/ipa/ipa_controls.h>
diff --git a/src/libcamera/ipa_data_serializer.cpp b/src/libcamera/ipa_data_serializer.cpp
index 0a259305..3e9bef08 100644
--- a/src/libcamera/ipa_data_serializer.cpp
+++ b/src/libcamera/ipa_data_serializer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipa_data_serializer.cpp - Image Processing Algorithm data serializer
+ * Image Processing Algorithm data serializer
*/
#include "libcamera/internal/ipa_data_serializer.h"
diff --git a/src/libcamera/ipa_interface.cpp b/src/libcamera/ipa_interface.cpp
index 8ea6cbee..a9dc54ad 100644
--- a/src/libcamera/ipa_interface.cpp
+++ b/src/libcamera/ipa_interface.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface.cpp - Image Processing Algorithm interface
+ * Image Processing Algorithm interface
*/
#include <libcamera/ipa/ipa_interface.h>
diff --git a/src/libcamera/ipa_manager.cpp b/src/libcamera/ipa_manager.cpp
index ec966045..f4e0b633 100644
--- a/src/libcamera/ipa_manager.cpp
+++ b/src/libcamera/ipa_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_manager.cpp - Image Processing Algorithm module manager
+ * Image Processing Algorithm module manager
*/
#include "libcamera/internal/ipa_manager.h"
@@ -109,6 +109,11 @@ IPAManager::IPAManager()
LOG(IPAManager, Fatal)
<< "Multiple IPAManager objects are not allowed";
+#if HAVE_IPA_PUBKEY
+ if (!pubKey_.isValid())
+ LOG(IPAManager, Warning) << "Public key not valid";
+#endif
+
unsigned int ipaCount = 0;
/* User-specified paths take precedence. */
@@ -133,7 +138,7 @@ IPAManager::IPAManager()
std::string root = utils::libcameraBuildPath();
if (!root.empty()) {
std::string ipaBuildPath = root + "src/ipa";
- constexpr int maxDepth = 1;
+ constexpr int maxDepth = 2;
LOG(IPAManager, Info)
<< "libcamera is not installed. Adding '"
@@ -274,6 +279,19 @@ IPAModule *IPAManager::module(PipelineHandler *pipe, uint32_t minVersion,
* found or if the IPA proxy fails to initialize
*/
+#if HAVE_IPA_PUBKEY
+/**
+ * \fn IPAManager::pubKey()
+ * \brief Retrieve the IPA module signing public key
+ *
+ * IPA module signature verification is normally handled internally by the
+ * IPAManager class. This function is meant to be used by utilities that need to
+ * verify signatures externally.
+ *
+ * \return The IPA module signing public key
+ */
+#endif
+
bool IPAManager::isSignatureValid([[maybe_unused]] IPAModule *ipa) const
{
#if HAVE_IPA_PUBKEY
diff --git a/src/libcamera/ipa_module.cpp b/src/libcamera/ipa_module.cpp
index c9ff7de3..0756b691 100644
--- a/src/libcamera/ipa_module.cpp
+++ b/src/libcamera/ipa_module.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module.cpp - Image Processing Algorithm module
+ * Image Processing Algorithm module
*/
#include "libcamera/internal/ipa_module.h"
@@ -225,9 +225,9 @@ Span<const uint8_t> elfLoadSymbol(Span<const uint8_t> elf, const char *symbol)
* \brief The name of the IPA module
*
* The name may be used to build file system paths to IPA-specific resources.
- * It shall only contain printable characters, and may not contain '/', '*',
- * '?' or '\'. For IPA modules included in libcamera, it shall match the
- * directory of the IPA module in the source tree.
+ * It shall only contain printable characters, and may not contain '*', '?' or
+ * '\'. For IPA modules included in libcamera, it shall match the directory of
+ * the IPA module in the source tree.
*
* \todo Allow user to choose to isolate open source IPAs
*/
@@ -288,25 +288,30 @@ int IPAModule::loadIPAModuleInfo()
}
Span<const uint8_t> info = elfLoadSymbol(data, "ipaModuleInfo");
- if (info.size() != sizeof(info_)) {
+ if (info.size() < sizeof(info_)) {
LOG(IPAModule, Error) << "IPA module has no valid info";
return -EINVAL;
}
- memcpy(&info_, info.data(), info.size());
+ memcpy(&info_, info.data(), sizeof(info_));
if (info_.moduleAPIVersion != IPA_MODULE_API_VERSION) {
LOG(IPAModule, Error) << "IPA module API version mismatch";
return -EINVAL;
}
- /* Validate the IPA module name. */
+ /*
+ * Validate the IPA module name.
+ *
+ * \todo Consider module naming restrictions to avoid escaping from a
+ * base directory. Forbidding ".." may be enough, but this may be best
+ * implemented in a different layer.
+ */
std::string ipaName = info_.name;
auto iter = std::find_if_not(ipaName.begin(), ipaName.end(),
[](unsigned char c) -> bool {
- return isprint(c) && c != '/' &&
- c != '?' && c != '*' &&
- c != '\\';
+ return isprint(c) && c != '?' &&
+ c != '*' && c != '\\';
});
if (iter != ipaName.end()) {
LOG(IPAModule, Error)
diff --git a/src/libcamera/ipa_proxy.cpp b/src/libcamera/ipa_proxy.cpp
index 3f2cc6b8..6c17c456 100644
--- a/src/libcamera/ipa_proxy.cpp
+++ b/src/libcamera/ipa_proxy.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_proxy.cpp - Image Processing Algorithm proxy
+ * Image Processing Algorithm proxy
*/
#include "libcamera/internal/ipa_proxy.h"
diff --git a/src/libcamera/ipa_pub_key.cpp.in b/src/libcamera/ipa_pub_key.cpp.in
index 01e5333b..5d8c92c2 100644
--- a/src/libcamera/ipa_pub_key.cpp.in
+++ b/src/libcamera/ipa_pub_key.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * ipa_pub_key.cpp - IPA module signing public key
+ * IPA module signing public key
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/ipc_pipe.cpp b/src/libcamera/ipc_pipe.cpp
index 31a0ca09..548299d0 100644
--- a/src/libcamera/ipc_pipe.cpp
+++ b/src/libcamera/ipc_pipe.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe.cpp - Image Processing Algorithm IPC module for IPA proxies
+ * Image Processing Algorithm IPC module for IPA proxies
*/
#include "libcamera/internal/ipc_pipe.h"
diff --git a/src/libcamera/ipc_pipe_unixsocket.cpp b/src/libcamera/ipc_pipe_unixsocket.cpp
index da2cffc3..668ec73b 100644
--- a/src/libcamera/ipc_pipe_unixsocket.cpp
+++ b/src/libcamera/ipc_pipe_unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe_unixsocket.cpp - Image Processing Algorithm IPC module using unix socket
+ * Image Processing Algorithm IPC module using unix socket
*/
#include "libcamera/internal/ipc_pipe_unixsocket.h"
diff --git a/src/libcamera/ipc_unixsocket.cpp b/src/libcamera/ipc_unixsocket.cpp
index 1980d374..75285b67 100644
--- a/src/libcamera/ipc_unixsocket.cpp
+++ b/src/libcamera/ipc_unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipc_unixsocket.cpp - IPC mechanism based on Unix sockets
+ * IPC mechanism based on Unix sockets
*/
#include "libcamera/internal/ipc_unixsocket.h"
diff --git a/src/libcamera/mapped_framebuffer.cpp b/src/libcamera/mapped_framebuffer.cpp
index 6860069b..b3104e05 100644
--- a/src/libcamera/mapped_framebuffer.cpp
+++ b/src/libcamera/mapped_framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mapped_framebuffer.cpp - Mapped Framebuffer support
+ * Mapped Framebuffer support
*/
#include "libcamera/internal/mapped_framebuffer.h"
diff --git a/src/libcamera/media_device.cpp b/src/libcamera/media_device.cpp
index 7c94da9e..bd054552 100644
--- a/src/libcamera/media_device.cpp
+++ b/src/libcamera/media_device.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_device.cpp - Media device handler
+ * Media device handler
*/
#include "libcamera/internal/media_device.h"
@@ -352,8 +352,9 @@ MediaEntity *MediaDevice::getEntityByName(const std::string &name) const
* entity with name \a sourceName, to the pad at index \a sinkIdx of the
* sink entity with name \a sinkName, if any.
*
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -381,8 +382,9 @@ MediaLink *MediaDevice::link(const std::string &sourceName, unsigned int sourceI
* entity \a source, to the pad at index \a sinkIdx of the sink entity \a
* sink, if any.
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -404,8 +406,10 @@ MediaLink *MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx,
* \param[in] source The source pad
* \param[in] sink The sink pad
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -473,7 +477,7 @@ int MediaDevice::open()
return -EBUSY;
}
- fd_ = UniqueFD(::open(deviceNode_.c_str(), O_RDWR));
+ fd_ = UniqueFD(::open(deviceNode_.c_str(), O_RDWR | O_CLOEXEC));
if (!fd_.isValid()) {
int ret = -errno;
LOG(MediaDevice, Error)
diff --git a/src/libcamera/media_object.cpp b/src/libcamera/media_object.cpp
index c78f4758..1b191a1e 100644
--- a/src/libcamera/media_object.cpp
+++ b/src/libcamera/media_object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_object.cpp - Media device objects: entities, pads and links
+ * Media device objects: entities, pads and links
*/
#include "libcamera/internal/media_object.h"
diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
index b57bee7e..a3b12bc1 100644
--- a/src/libcamera/meson.build
+++ b/src/libcamera/meson.build
@@ -7,15 +7,15 @@ libcamera_sources = files([
'camera_controls.cpp',
'camera_lens.cpp',
'camera_manager.cpp',
- 'camera_sensor.cpp',
- 'camera_sensor_properties.cpp',
'color_space.cpp',
'controls.cpp',
'control_serializer.cpp',
'control_validator.cpp',
+ 'converter.cpp',
'delayed_controls.cpp',
'device_enumerator.cpp',
'device_enumerator_sysfs.cpp',
+ 'dma_heaps.cpp',
'fence.cpp',
'formats.cpp',
'framebuffer.cpp',
@@ -33,11 +33,13 @@ libcamera_sources = files([
'mapped_framebuffer.cpp',
'media_device.cpp',
'media_object.cpp',
+ 'orientation.cpp',
'pipeline_handler.cpp',
'pixel_format.cpp',
'process.cpp',
'pub_key.cpp',
'request.cpp',
+ 'shared_mem_object.cpp',
'source_paths.cpp',
'stream.cpp',
'sysfs.cpp',
@@ -57,20 +59,46 @@ includes = [
libcamera_includes,
]
+libcamera_deps = []
+
libatomic = cc.find_library('atomic', required : false)
+libthreads = dependency('threads')
subdir('base')
+subdir('converter')
subdir('ipa')
subdir('pipeline')
subdir('proxy')
+subdir('sensor')
+subdir('software_isp')
+
+null_dep = dependency('', required : false)
-libdl = cc.find_library('dl')
-libgnutls = cc.find_library('gnutls', required : true)
-libudev = dependency('libudev', required : false)
+# TODO: Use dependency('dl') when updating to meson 0.62.0 or newer.
+libdl = null_dep
+if not cc.has_function('dlopen')
+ libdl = cc.find_library('dl')
+endif
+libudev = dependency('libudev', required : get_option('udev'))
libyaml = dependency('yaml-0.1', required : false)
-if libgnutls.found()
+# Use one of gnutls or libcrypto (provided by OpenSSL), trying gnutls first.
+libcrypto = dependency('gnutls', required : false)
+if libcrypto.found()
config_h.set('HAVE_GNUTLS', 1)
+else
+ libcrypto = dependency('libcrypto', required : false)
+ if libcrypto.found()
+ config_h.set('HAVE_CRYPTO', 1)
+ endif
+endif
+
+if not libcrypto.found()
+ warning('Neither gnutls nor libcrypto found, all IPA modules will be isolated')
+ summary({'IPA modules signed with': 'None (modules will run isolated)'},
+ section : 'Configuration')
+else
+ summary({'IPA modules signed with' : libcrypto.name()}, section : 'Configuration')
endif
if liblttng.found()
@@ -101,12 +129,27 @@ endif
control_sources = []
-foreach source : control_source_files
- input_files = files(source +'.yaml', source + '.cpp.in')
- control_sources += custom_target(source + '_cpp',
+controls_mode_files = {
+ 'controls' : controls_files,
+ 'properties' : properties_files,
+}
+
+foreach mode, input_files : controls_mode_files
+ input_files = files(input_files)
+
+ if mode == 'controls'
+ template_file = files('control_ids.cpp.in')
+ else
+ template_file = files('property_ids.cpp.in')
+ endif
+
+ ranges_file = files('control_ranges.yaml')
+ control_sources += custom_target(mode + '_cpp',
input : input_files,
- output : source + '.cpp',
- command : [gen_controls, '-o', '@OUTPUT@', '@INPUT@'])
+ output : mode + '_ids.cpp',
+ command : [gen_controls, '-o', '@OUTPUT@',
+ '--mode', mode, '-t', template_file,
+ '-r', ranges_file, '@INPUT@'])
endforeach
libcamera_sources += control_sources
@@ -131,12 +174,12 @@ if ipa_sign_module
libcamera_sources += ipa_pub_key_cpp
endif
-libcamera_deps = [
+libcamera_deps += [
libatomic,
libcamera_base,
libcamera_base_private,
+ libcrypto,
libdl,
- libgnutls,
liblttng,
libudev,
libyaml,
@@ -150,6 +193,7 @@ libcamera_deps = [
libcamera = shared_library('libcamera',
libcamera_sources,
version : libcamera_version,
+ soversion : libcamera_soversion,
name_prefix : '',
install : true,
include_directories : includes,
@@ -179,4 +223,6 @@ pkg_mod.generate(libcamera,
description : 'Complex Camera Support Library',
subdirs : 'libcamera')
+meson.override_dependency('libcamera', libcamera_public)
+
subdir('proxy/worker')
diff --git a/src/libcamera/orientation.cpp b/src/libcamera/orientation.cpp
new file mode 100644
index 00000000..47fd6a32
--- /dev/null
+++ b/src/libcamera/orientation.cpp
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas On Board Oy
+ *
+ * Image orientation
+ */
+
+#include <libcamera/orientation.h>
+
+#include <array>
+#include <string>
+
+/**
+ * \file libcamera/orientation.h
+ * \brief Image orientation definition
+ */
+
+namespace libcamera {
+
+/**
+ * \enum Orientation
+ * \brief The image orientation in a memory buffer
+ *
+ * The Orientation enumeration describes the orientation of the images
+ * produced by the camera pipeline as they get received by the application
+ * inside memory buffers.
+ *
+ * The image orientation expressed using the Orientation enumeration can be then
+ * inferred by applying to a naturally oriented image a multiple of a 90 degrees
+ * rotation in the clockwise direction from the origin and then by applying an
+ * optional horizontal mirroring.
+ *
+ * The enumeration numerical values follow the ones defined by the EXIF
+ * Specification version 2.32, Tag 274 "Orientation", while the names of the
+ * enumerated values report the rotation and mirroring operations performed.
+ *
+ * For example, Orientation::Rotate90Mirror describes the orientation obtained
+ * by rotating the image 90 degrees clockwise first and then applying a
+ * horizontal mirroring.
+ *
+ * \var CameraConfiguration::Rotate0
+ * \image html rotation/rotate0.svg
+ * \var CameraConfiguration::Rotate0Mirror
+ * \image html rotation/rotate0Mirror.svg
+ * \var CameraConfiguration::Rotate180
+ * \image html rotation/rotate180.svg
+ * \var CameraConfiguration::Rotate180Mirror
+ * \image html rotation/rotate180Mirror.svg
+ * \var CameraConfiguration::Rotate90Mirror
+ * \image html rotation/rotate90Mirror.svg
+ * \var CameraConfiguration::Rotate270
+ * \image html rotation/rotate270.svg
+ * \var CameraConfiguration::Rotate270Mirror
+ * \image html rotation/rotate270Mirror.svg
+ * \var CameraConfiguration::Rotate90
+ * \image html rotation/rotate90.svg
+ */
+
+/**
+ * \brief Return the orientation representing a rotation of the given angle
+ * clockwise
+ * \param[in] angle The angle of rotation in a clockwise sense. Negative values
+ * can be used to represent anticlockwise rotations
+ * \param[out] success Set to `true` if the angle is a multiple of 90 degrees,
+ * otherwise `false`
+ * \return The orientation corresponding to the rotation if \a success was set
+ * to `true`, otherwise the `Rotate0` orientation
+ */
+Orientation orientationFromRotation(int angle, bool *success)
+{
+ angle = angle % 360;
+ if (angle < 0)
+ angle += 360;
+
+ if (success != nullptr)
+ *success = true;
+
+ switch (angle) {
+ case 0:
+ return Orientation::Rotate0;
+ case 90:
+ return Orientation::Rotate90;
+ case 180:
+ return Orientation::Rotate180;
+ case 270:
+ return Orientation::Rotate270;
+ }
+
+ if (success != nullptr)
+ *success = false;
+
+ return Orientation::Rotate0;
+}
+
+/**
+ * \brief Prints human-friendly names for Orientation items
+ * \param[in] out The output stream
+ * \param[in] orientation The Orientation item
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Orientation &orientation)
+{
+ constexpr std::array<const char *, 9> orientationNames = {
+ "", /* Orientation starts counting from 1. */
+ "Rotate0", "Rotate0Mirror",
+ "Rotate180", "Rotate180Mirror",
+ "Rotate90Mirror", "Rotate270",
+ "Rotate270Mirror", "Rotate90",
+ };
+
+ out << orientationNames[static_cast<unsigned int>(orientation)];
+ return out;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
new file mode 100644
index 00000000..72aa6c75
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
@@ -0,0 +1,1117 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022 - Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Pipeline handler for ISI interface found on NXP i.MX8 SoC
+ */
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera_manager.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "linux/media-bus-format.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(ISI)
+
+class PipelineHandlerISI;
+
+class ISICameraData : public Camera::Private
+{
+public:
+ ISICameraData(PipelineHandler *ph)
+ : Camera::Private(ph)
+ {
+ /*
+ * \todo Assume 2 channels only for now, as that's the number of
+ * available channels on i.MX8MP.
+ */
+ streams_.resize(2);
+ }
+
+ PipelineHandlerISI *pipe();
+
+ int init();
+
+ unsigned int pipeIndex(const Stream *stream)
+ {
+ return stream - &*streams_.begin();
+ }
+
+ unsigned int getRawMediaBusFormat(PixelFormat *pixelFormat) const;
+ unsigned int getYuvMediaBusFormat(const PixelFormat &pixelFormat) const;
+ unsigned int getMediaBusFormat(PixelFormat *pixelFormat) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> csis_;
+
+ std::vector<Stream> streams_;
+
+ std::vector<Stream *> enabledStreams_;
+
+ unsigned int xbarSink_;
+};
+
+class ISICameraConfiguration : public CameraConfiguration
+{
+public:
+ ISICameraConfiguration(ISICameraData *data)
+ : data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ static const std::map<PixelFormat, unsigned int> formatsMap_;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ CameraConfiguration::Status
+ validateRaw(std::set<Stream *> &availableStreams, const Size &maxResolution);
+ CameraConfiguration::Status
+ validateYuv(std::set<Stream *> &availableStreams, const Size &maxResolution);
+
+ const ISICameraData *data_;
+};
+
+class PipelineHandlerISI : public PipelineHandler
+{
+public:
+ PipelineHandlerISI(CameraManager *manager);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+
+protected:
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+private:
+ static constexpr Size kPreviewSize = { 1920, 1080 };
+ static constexpr Size kMinISISize = { 1, 1 };
+
+ struct Pipe {
+ std::unique_ptr<V4L2Subdevice> isi;
+ std::unique_ptr<V4L2VideoDevice> capture;
+ };
+
+ ISICameraData *cameraData(Camera *camera)
+ {
+ return static_cast<ISICameraData *>(camera->_d());
+ }
+
+ Pipe *pipeFromStream(Camera *camera, const Stream *stream);
+
+ StreamConfiguration generateYUVConfiguration(Camera *camera,
+ const Size &size);
+ StreamConfiguration generateRawConfiguration(Camera *camera);
+
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *isiDev_;
+
+ std::unique_ptr<V4L2Subdevice> crossbar_;
+ std::vector<Pipe> pipes_;
+};
+
+/* -----------------------------------------------------------------------------
+ * Camera Data
+ */
+
+PipelineHandlerISI *ISICameraData::pipe()
+{
+ return static_cast<PipelineHandlerISI *>(Camera::Private::pipe());
+}
+
+/* Open and initialize pipe components. */
+int ISICameraData::init()
+{
+ int ret = sensor_->init();
+ if (ret)
+ return ret;
+
+ ret = csis_->open();
+ if (ret)
+ return ret;
+
+ properties_ = sensor_->properties();
+
+ return 0;
+}
+
+/*
+ * Get a RAW Bayer media bus format compatible with the requested pixelFormat.
+ *
+ * If the requested pixelFormat cannot be produced by the sensor adjust it to
+ * the one corresponding to the media bus format with the largest bit-depth.
+ */
+unsigned int ISICameraData::getRawMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ static const std::map<PixelFormat, unsigned int> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ };
+
+ /*
+ * Make sure the requested PixelFormat is supported in the above
+ * map and the sensor can produce the compatible mbus code.
+ */
+ auto it = rawFormats.find(*pixelFormat);
+ if (it != rawFormats.end() &&
+ std::count(mbusCodes.begin(), mbusCodes.end(), it->second))
+ return it->second;
+
+ if (it == rawFormats.end())
+ LOG(ISI, Warning) << pixelFormat
+ << " not supported in ISI formats map.";
+
+ /*
+ * The desired pixel format cannot be produced. Adjust it to the one
+ * corresponding to the raw media bus format with the largest bit-depth
+ * the sensor provides.
+ */
+ unsigned int sensorCode = 0;
+ unsigned int maxDepth = 0;
+ *pixelFormat = {};
+
+ for (unsigned int code : mbusCodes) {
+ /* Make sure the media bus format is RAW Bayer. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ /* Make sure the media format is supported. */
+ it = std::find_if(rawFormats.begin(), rawFormats.end(),
+ [code](auto &rawFormat) {
+ return rawFormat.second == code;
+ });
+
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ /* Pick the one with the largest bit depth. */
+ if (bayerFormat.bitDepth > maxDepth) {
+ maxDepth = bayerFormat.bitDepth;
+ *pixelFormat = it->first;
+ sensorCode = code;
+ }
+ }
+
+ if (!pixelFormat->isValid())
+ LOG(ISI, Error) << "Cannot find a supported RAW format";
+
+ return sensorCode;
+}
+
+/*
+ * Get a YUV/RGB media bus format from which the ISI can produce a processed
+ * stream, preferring codes with the same colour encoding as the requested
+ * pixelformat.
+ *
+ * If the sensor does not provide any YUV/RGB media bus format the ISI cannot
+ * generate any processed pixel format as it cannot debayer.
+ */
+unsigned int ISICameraData::getYuvMediaBusFormat(const PixelFormat &pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ /*
+ * The ISI can produce YUV/RGB pixel formats from any non-RAW Bayer
+ * media bus formats.
+ *
+ * Keep the list in sync with the mxc_isi_bus_formats[] array in
+ * the ISI driver.
+ */
+ std::vector<unsigned int> yuvCodes = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ };
+
+ std::sort(mbusCodes.begin(), mbusCodes.end());
+ std::sort(yuvCodes.begin(), yuvCodes.end());
+
+ std::vector<unsigned int> supportedCodes;
+ std::set_intersection(mbusCodes.begin(), mbusCodes.end(),
+ yuvCodes.begin(), yuvCodes.end(),
+ std::back_inserter(supportedCodes));
+
+ if (supportedCodes.empty()) {
+ LOG(ISI, Warning) << "Cannot find a supported YUV/RGB format";
+
+ return 0;
+ }
+
+ /* Prefer codes with the same encoding as the requested pixel format. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
+ for (unsigned int code : supportedCodes) {
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingYUV &&
+ (code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ code == MEDIA_BUS_FMT_YUV8_1X24))
+ return code;
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRGB &&
+ (code == MEDIA_BUS_FMT_RGB565_1X16 ||
+ code == MEDIA_BUS_FMT_RGB888_1X24))
+ return code;
+ }
+
+ /* Otherwise return the first found code. */
+ return supportedCodes[0];
+}
+
+unsigned int ISICameraData::getMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ if (PixelFormatInfo::info(*pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW)
+ return getRawMediaBusFormat(pixelFormat);
+
+ return getYuvMediaBusFormat(*pixelFormat);
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+/*
+ * ISICameraConfiguration::formatsMap_ records the association between an output
+ * pixel format and the ISI source pixel format to be applied to the pipeline.
+ */
+const std::map<PixelFormat, unsigned int> ISICameraConfiguration::formatsMap_ = {
+ { formats::YUYV, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::AVUY8888, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV16, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::YUV444, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::RGB565, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::ABGR8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+/*
+ * Adjust stream configuration when the first requested stream is RAW: all the
+ * streams will have the same RAW pixelformat and size.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateRaw(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ /*
+ * Make sure the requested RAW format is supported by the
+ * pipeline, otherwise adjust it.
+ */
+ std::vector<unsigned int> mbusCodes = data_->sensor_->mbusCodes();
+ StreamConfiguration &rawConfig = config_[0];
+ PixelFormat rawFormat = rawConfig.pixelFormat;
+
+ unsigned int sensorCode = data_->getRawMediaBusFormat(&rawFormat);
+ if (!sensorCode) {
+ LOG(ISI, Error) << "Cannot adjust RAW pixelformat "
+ << rawConfig.pixelFormat;
+ return Invalid;
+ }
+
+ if (rawFormat != rawConfig.pixelFormat) {
+ LOG(ISI, Debug) << "RAW pixelformat adjusted to "
+ << rawFormat;
+ rawConfig.pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ /* Cap the RAW stream size to the maximum resolution. */
+ const Size configSize = rawConfig.size;
+ rawConfig.size.boundTo(maxResolution);
+ if (rawConfig.size != configSize) {
+ LOG(ISI, Debug) << "RAW size adjusted to "
+ << rawConfig.size;
+ status = Adjusted;
+ }
+
+ /* Adjust all other streams to RAW. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+ const PixelFormat pixFmt = cfg.pixelFormat;
+ const Size size = cfg.size;
+
+ cfg.pixelFormat = rawConfig.pixelFormat;
+ cfg.size = rawConfig.size;
+
+ if (cfg.pixelFormat != pixFmt || cfg.size != size) {
+ LOG(ISI, Debug) << "Stream " << i << " adjusted to "
+ << cfg.toString();
+ status = Adjusted;
+ }
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+/*
+ * Adjust stream configuration when the first requested stream is not RAW: all
+ * the streams will be either YUV or RGB processed formats.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateYuv(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ StreamConfiguration &yuvConfig = config_[0];
+ PixelFormat yuvPixelFormat = yuvConfig.pixelFormat;
+
+ /*
+ * Make sure the sensor can produce a compatible YUV/RGB media bus
+ * format. If the sensor can only produce RAW Bayer we can only fail
+ * here as we can't adjust to anything but RAW.
+ */
+ unsigned int yuvMediaBusCode = data_->getYuvMediaBusFormat(yuvPixelFormat);
+ if (!yuvMediaBusCode) {
+ LOG(ISI, Error) << "Cannot adjust pixelformat "
+ << yuvConfig.pixelFormat;
+ return Invalid;
+ }
+
+ /* Adjust all the other streams. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+
+ /* If the stream is RAW or not supported default it to YUYV. */
+ const PixelFormatInfo &cfgInfo = PixelFormatInfo::info(cfg.pixelFormat);
+ if (cfgInfo.colourEncoding == PixelFormatInfo::ColourEncodingRAW ||
+ !formatsMap_.count(cfg.pixelFormat)) {
+
+ LOG(ISI, Debug) << "Stream " << i << " format: "
+ << cfg.pixelFormat << " adjusted to YUYV";
+
+ cfg.pixelFormat = formats::YUYV;
+ status = Adjusted;
+ }
+
+ /* Cap the streams size to the maximum accepted resolution. */
+ Size configSize = cfg.size;
+ cfg.size.boundTo(maxResolution);
+ if (cfg.size != configSize) {
+ LOG(ISI, Debug)
+ << "Stream " << i << " adjusted to " << cfg.size;
+ status = Adjusted;
+ }
+
+ /* Re-fetch the pixel format info in case it has been adjusted. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ /* \todo Multiplane ? */
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status ISICameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ std::set<Stream *> availableStreams;
+ std::transform(data_->streams_.begin(), data_->streams_.end(),
+ std::inserter(availableStreams, availableStreams.end()),
+ [](const Stream &s) { return const_cast<Stream *>(&s); });
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of streams to the number of available ISI pipes. */
+ if (config_.size() > availableStreams.size()) {
+ config_.resize(availableStreams.size());
+ status = Adjusted;
+ }
+
+ /*
+ * If more than a single stream is requested, the maximum allowed input
+ * image width is 2048. Cap the maximum image size accordingly.
+ *
+ * \todo The (size > 1) check only applies to i.MX8MP which has 2 ISI
+ * channels. SoCs with more channels than the i.MX8MP are capable of
+ * supporting more streams with input width > 2048 by chaining
+ * successive channels together. Define a policy for channels allocation
+ * to fully support other SoCs.
+ */
+ CameraSensor *sensor = data_->sensor_.get();
+ Size maxResolution = sensor->resolution();
+ if (config_.size() > 1)
+ maxResolution.width = std::min(2048U, maxResolution.width);
+
+ /* Validate streams according to the format of the first one. */
+ const PixelFormatInfo info = PixelFormatInfo::info(config_[0].pixelFormat);
+
+ Status validationStatus;
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ validationStatus = validateRaw(availableStreams, maxResolution);
+ else
+ validationStatus = validateYuv(availableStreams, maxResolution);
+
+ if (validationStatus == Invalid)
+ return Invalid;
+
+ if (validationStatus == Adjusted)
+ status = Adjusted;
+
+ /*
+ * Sensor format selection policy: the first stream selects the media
+ * bus code to use, the largest stream selects the size.
+ *
+ * \todo The sensor format selection policy could be changed to
+ * prefer operating the sensor at full resolution to prioritize
+ * image quality in exchange of a usually slower frame rate.
+ * Usage of the STILL_CAPTURE role could be consider for this.
+ */
+ Size maxSize;
+ for (const auto &cfg : config_) {
+ if (cfg.size > maxSize)
+ maxSize = cfg.size;
+ }
+
+ PixelFormat pixelFormat = config_[0].pixelFormat;
+
+ V4L2SubdeviceFormat sensorFormat{};
+ sensorFormat.code = data_->getMediaBusFormat(&pixelFormat);
+ sensorFormat.size = maxSize;
+
+ LOG(ISI, Debug) << "Computed sensor configuration: " << sensorFormat;
+
+ /*
+ * We can't use CameraSensor::getFormat() as it might return a
+ * format larger than our strict width limit, as that function
+ * prioritizes formats with the same aspect ratio over formats with less
+ * difference in size.
+ *
+ * Manually walk all the sensor supported sizes searching for
+ * the smallest larger format without considering the aspect ratio
+ * as the ISI can freely scale.
+ */
+ auto sizes = sensor->sizes(sensorFormat.code);
+ Size bestSize;
+
+ for (const Size &s : sizes) {
+ /* Ignore smaller sizes. */
+ if (s.width < sensorFormat.size.width ||
+ s.height < sensorFormat.size.height)
+ continue;
+
+ /* Make sure the width stays in the limits. */
+ if (s.width > maxResolution.width)
+ continue;
+
+ bestSize = s;
+ break;
+ }
+
+ /*
+ * This should happen only if the sensor can only produce formats that
+ * exceed the maximum allowed input width.
+ */
+ if (bestSize.isNull()) {
+ LOG(ISI, Error) << "Unable to find a suitable sensor format";
+ return Invalid;
+ }
+
+ sensorFormat_.code = sensorFormat.code;
+ sensorFormat_.size = bestSize;
+
+ LOG(ISI, Debug) << "Selected sensor format: " << sensorFormat_;
+
+ return status;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Handler
+ */
+
+PipelineHandlerISI::PipelineHandlerISI(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+/*
+ * Generate a StreamConfiguration for YUV/RGB use case.
+ *
+ * Verify it the sensor can produce a YUV/RGB media bus format and collect
+ * all the processed pixel formats the ISI can generate as supported stream
+ * configurations.
+ */
+StreamConfiguration PipelineHandlerISI::generateYUVConfiguration(Camera *camera,
+ const Size &size)
+{
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::YUYV;
+ unsigned int mbusCode;
+
+ mbusCode = data->getYuvMediaBusFormat(pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /* Adjust the requested size to the sensor's capabilities. */
+ V4L2SubdeviceFormat sensorFmt;
+ sensorFmt.code = mbusCode;
+ sensorFmt.size = size;
+
+ int ret = data->sensor_->tryFormat(&sensorFmt);
+ if (ret) {
+ LOG(ISI, Error) << "Failed to try sensor format.";
+ return {};
+ }
+
+ Size sensorSize = sensorFmt.size;
+
+ /*
+ * Populate the StreamConfiguration.
+ *
+ * As the sensor supports at least one YUV/RGB media bus format all the
+ * processed ones in formatsMap_ can be generated from it.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+
+ for (const auto &[pixFmt, pipeFmt] : ISICameraConfiguration::formatsMap_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ streamFormats[pixFmt] = { { kMinISISize, sensorSize } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = sensorSize;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+/*
+ * Generate a StreamConfiguration for Raw Bayer use case. Verify if the sensor
+ * can produce the requested RAW bayer format and eventually adjust it to
+ * the one with the largest bit-depth the sensor can produce.
+ */
+StreamConfiguration PipelineHandlerISI::generateRawConfiguration(Camera *camera)
+{
+ static const std::map<unsigned int, PixelFormat> rawFormats = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, formats::SBGGR8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, formats::SGBRG8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, formats::SGRBG8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, formats::SRGGB8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, formats::SBGGR12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, formats::SGBRG12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, formats::SGRBG12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, formats::SRGGB12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, formats::SBGGR14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, formats::SGBRG14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, formats::SGRBG14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, formats::SRGGB14 },
+ };
+
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::SBGGR10;
+ unsigned int mbusCode;
+
+ /* pixelFormat will be adjusted, if the sensor can produce RAW. */
+ mbusCode = data->getRawMediaBusFormat(&pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /*
+ * Populate the StreamConfiguration with all the supported Bayer
+ * formats the sensor can produce.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ const CameraSensor *sensor = data->sensor_.get();
+
+ for (unsigned int code : sensor->mbusCodes()) {
+ /* Find a Bayer media bus code from the sensor. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ auto it = rawFormats.find(code);
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ streamFormats[it->second] = { { sensor->resolution(), sensor->resolution() } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.size = sensor->resolution();
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerISI::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ ISICameraData *data = cameraData(camera);
+ std::unique_ptr<ISICameraConfiguration> config =
+ std::make_unique<ISICameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ if (roles.size() > data->streams_.size()) {
+ LOG(ISI, Error) << "Only up to " << data->streams_.size()
+ << " streams are supported";
+ return nullptr;
+ }
+
+ for (const auto &role : roles) {
+ /*
+ * Prefer the following formats:
+ * - Still Capture: Full resolution YUYV
+ * - ViewFinder/VideoRecording: 1080p YUYV
+ * - RAW: Full resolution Bayer
+ */
+ StreamConfiguration cfg;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
+ Size size = role == StreamRole::StillCapture
+ ? data->sensor_->resolution()
+ : PipelineHandlerISI::kPreviewSize;
+ cfg = generateYUVConfiguration(camera, size);
+ if (cfg.pixelFormat.isValid())
+ break;
+
+
+ /*
+ * Fallback to use a Bayer format if that's what the
+ * sensor supports.
+ */
+ [[fallthrough]];
+
+ }
+
+ case StreamRole::Raw: {
+ cfg = generateRawConfiguration(camera);
+ break;
+ }
+
+ default:
+ LOG(ISI, Error) << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ if (!cfg.pixelFormat.isValid()) {
+ LOG(ISI, Error)
+ << "Cannot generate configuration for role: " << role;
+ return nullptr;
+ }
+
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerISI::configure(Camera *camera, CameraConfiguration *c)
+{
+ ISICameraConfiguration *camConfig = static_cast<ISICameraConfiguration *>(c);
+ ISICameraData *data = cameraData(camera);
+
+ /* All links are immutable except the sensor -> csis link. */
+ const MediaPad *sensorSrc = data->sensor_->entity()->getPadByIndex(0);
+ sensorSrc->links()[0]->setEnabled(true);
+
+ /*
+ * Reset the crossbar switch routing and enable one route for each
+ * requested stream configuration.
+ *
+ * \todo Handle concurrent usage of multiple cameras by adjusting the
+ * routing table instead of resetting it.
+ */
+ V4L2Subdevice::Routing routing = {};
+ unsigned int xbarFirstSource = crossbar_->entity()->pads().size() / 2 + 1;
+
+ for (const auto &[idx, config] : utils::enumerate(*c)) {
+ uint32_t sourcePad = xbarFirstSource + idx;
+ routing.emplace_back(V4L2Subdevice::Stream{ data->xbarSink_, 0 },
+ V4L2Subdevice::Stream{ sourcePad, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ }
+
+ int ret = crossbar_->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /* Apply format to the sensor and CSIS receiver. */
+ V4L2SubdeviceFormat format = camConfig->sensorFormat_;
+ ret = data->sensor_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ ret = data->csis_->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ ret = crossbar_->setFormat(data->xbarSink_, &format);
+ if (ret)
+ return ret;
+
+ /* Now configure the ISI and video node instances, one per stream. */
+ data->enabledStreams_.clear();
+ for (const auto &config : *c) {
+ Pipe *pipe = pipeFromStream(camera, config.stream());
+
+ /*
+ * Set the format on the ISI sink pad: it must match what is
+ * received by the CSIS.
+ */
+ ret = pipe->isi->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the ISI sink compose rectangle to downscale the
+ * image.
+ *
+ * \todo Additional cropping could be applied on the ISI source
+ * pad to further reduce the output image size.
+ */
+ Rectangle isiScale(config.size);
+ ret = pipe->isi->setSelection(0, V4L2_SEL_TGT_COMPOSE, &isiScale);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the format on ISI source pad: only the media bus code
+ * is relevant as it configures format conversion, while the
+ * size is taken from the sink's COMPOSE (or source's CROP,
+ * if any) rectangles.
+ */
+ unsigned int isiCode = ISICameraConfiguration::formatsMap_.at(config.pixelFormat);
+
+ V4L2SubdeviceFormat isiFormat{};
+ isiFormat.code = isiCode;
+ isiFormat.size = config.size;
+
+ ret = pipe->isi->setFormat(1, &isiFormat);
+ if (ret)
+ return ret;
+
+ V4L2DeviceFormat captureFmt{};
+ captureFmt.fourcc = pipe->capture->toV4L2PixelFormat(config.pixelFormat);
+ captureFmt.size = config.size;
+
+ /* \todo Set stride and format. */
+ ret = pipe->capture->setFormat(&captureFmt);
+ if (ret)
+ return ret;
+
+ /* Store the list of enabled streams for later use. */
+ data->enabledStreams_.push_back(config.stream());
+ }
+
+ return 0;
+}
+
+int PipelineHandlerISI::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ unsigned int count = stream->configuration().bufferCount;
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ return pipe->capture->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerISI::start(Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+ const StreamConfiguration &config = stream->configuration();
+
+ int ret = pipe->capture->importBuffers(config.bufferCount);
+ if (ret)
+ return ret;
+
+ ret = pipe->capture->streamOn();
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerISI::stopDevice(Camera *camera)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ pipe->capture->streamOff();
+ pipe->capture->releaseBuffers();
+ }
+}
+
+int PipelineHandlerISI::queueRequestDevice(Camera *camera, Request *request)
+{
+ for (auto &[stream, buffer] : request->buffers()) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ int ret = pipe->capture->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool PipelineHandlerISI::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("mxc-isi");
+ dm.add("crossbar");
+ dm.add("mxc_isi.0");
+ dm.add("mxc_isi.0.capture");
+
+ isiDev_ = acquireMediaDevice(enumerator, dm);
+ if (!isiDev_)
+ return false;
+
+ /*
+ * Acquire the subdevs and video nodes for the crossbar switch and the
+ * processing pipelines.
+ */
+ crossbar_ = V4L2Subdevice::fromEntityName(isiDev_, "crossbar");
+ if (!crossbar_)
+ return false;
+
+ int ret = crossbar_->open();
+ if (ret)
+ return false;
+
+ for (unsigned int i = 0; ; ++i) {
+ std::string entityName = "mxc_isi." + std::to_string(i);
+ std::unique_ptr<V4L2Subdevice> isi =
+ V4L2Subdevice::fromEntityName(isiDev_, entityName);
+ if (!isi)
+ break;
+
+ ret = isi->open();
+ if (ret)
+ return false;
+
+ entityName += ".capture";
+ std::unique_ptr<V4L2VideoDevice> capture =
+ V4L2VideoDevice::fromEntityName(isiDev_, entityName);
+ if (!capture)
+ return false;
+
+ capture->bufferReady.connect(this, &PipelineHandlerISI::bufferReady);
+
+ ret = capture->open();
+ if (ret)
+ return ret;
+
+ pipes_.push_back({ std::move(isi), std::move(capture) });
+ }
+
+ if (pipes_.empty()) {
+ LOG(ISI, Error) << "Unable to enumerate pipes";
+ return false;
+ }
+
+ /*
+ * Loop over all the crossbar switch sink pads to find connected CSI-2
+ * receivers and camera sensors.
+ */
+ unsigned int numCameras = 0;
+ unsigned int numSinks = 0;
+ for (MediaPad *pad : crossbar_->entity()->pads()) {
+ unsigned int sink = numSinks;
+
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ /*
+ * Count each crossbar sink pad to correctly configure
+ * routing and format for this camera.
+ */
+ numSinks++;
+
+ MediaEntity *csi = pad->links()[0]->source()->entity();
+ if (csi->pads().size() != 2) {
+ LOG(ISI, Debug) << "Skip unsupported CSI-2 receiver "
+ << csi->name();
+ continue;
+ }
+
+ pad = csi->pads()[0];
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ MediaEntity *sensor = pad->links()[0]->source()->entity();
+ if (sensor->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ LOG(ISI, Debug) << "Skip unsupported subdevice "
+ << sensor->name();
+ continue;
+ }
+
+ /* Create the camera data. */
+ std::unique_ptr<ISICameraData> data =
+ std::make_unique<ISICameraData>(this);
+
+ data->sensor_ = std::make_unique<CameraSensor>(sensor);
+ data->csis_ = std::make_unique<V4L2Subdevice>(csi);
+ data->xbarSink_ = sink;
+
+ ret = data->init();
+ if (ret) {
+ LOG(ISI, Error) << "Failed to initialize camera data";
+ return false;
+ }
+
+ /* Register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::set<Stream *> streams;
+ std::transform(data->streams_.begin(), data->streams_.end(),
+ std::inserter(streams, streams.end()),
+ [](Stream &s) { return &s; });
+
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+
+ registerCamera(std::move(camera));
+ numCameras++;
+ }
+
+ return numCameras > 0;
+}
+
+PipelineHandlerISI::Pipe *PipelineHandlerISI::pipeFromStream(Camera *camera,
+ const Stream *stream)
+{
+ ISICameraData *data = cameraData(camera);
+ unsigned int pipeIndex = data->pipeIndex(stream);
+
+ ASSERT(pipeIndex < pipes_.size());
+
+ return &pipes_[pipeIndex];
+}
+
+void PipelineHandlerISI::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ ControlList &metadata = request->metadata();
+ if (!metadata.contains(controls::SensorTimestamp.id()))
+ metadata.set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ completeBuffer(request, buffer);
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerISI, "imx8-isi")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/meson.build b/src/libcamera/pipeline/imx8-isi/meson.build
new file mode 100644
index 00000000..ffd0ce54
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'imx8-isi.cpp'
+])
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
index 08e254f7..81a7a8ab 100644
--- a/src/libcamera/pipeline/ipu3/cio2.cpp
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.cpp - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#include "cio2.h"
@@ -15,6 +15,7 @@
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
+#include <libcamera/transform.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/framebuffer.h"
@@ -177,10 +178,12 @@ int CIO2Device::init(const MediaDevice *media, unsigned int index)
/**
* \brief Configure the CIO2 unit
* \param[in] size The requested CIO2 output frame size
+ * \param[in] transform The transformation to be applied on the image sensor
* \param[out] outputFormat The CIO2 unit output image format
* \return 0 on success or a negative error code otherwise
*/
-int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
+int CIO2Device::configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat sensorFormat;
int ret;
@@ -191,7 +194,7 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
*/
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
sensorFormat = getSensorFormat(mbusCodes, size);
- ret = sensor_->setFormat(&sensorFormat);
+ ret = sensor_->setFormat(&sensorFormat, transform);
if (ret)
return ret;
@@ -199,11 +202,11 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
if (ret)
return ret;
- const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.mbus_code);
+ const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.code);
if (itInfo == mbusCodesToPixelFormat.end())
return -EINVAL;
- outputFormat->fourcc = V4L2PixelFormat::fromPixelFormat(itInfo->second);
+ outputFormat->fourcc = output_->toV4L2PixelFormat(itInfo->second);
outputFormat->size = sensorFormat.size;
outputFormat->planesCount = 1;
@@ -227,13 +230,13 @@ StreamConfiguration CIO2Device::generateConfiguration(Size size) const
/* Query the sensor static information for closest match. */
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
V4L2SubdeviceFormat sensorFormat = getSensorFormat(mbusCodes, size);
- if (!sensorFormat.mbus_code) {
+ if (!sensorFormat.code) {
LOG(IPU3, Error) << "Sensor does not support mbus code";
return {};
}
cfg.size = sensorFormat.size;
- cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.mbus_code);
+ cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.code);
cfg.bufferCount = kBufferCount;
return cfg;
@@ -323,7 +326,7 @@ V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int>
}
V4L2SubdeviceFormat format{};
- format.mbus_code = bestCode;
+ format.code = bestCode;
format.size = bestSize;
return format;
diff --git a/src/libcamera/pipeline/ipu3/cio2.h b/src/libcamera/pipeline/ipu3/cio2.h
index 68504a2d..963c2f6b 100644
--- a/src/libcamera/pipeline/ipu3/cio2.h
+++ b/src/libcamera/pipeline/ipu3/cio2.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.h - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#pragma once
@@ -26,6 +26,7 @@ class Request;
class Size;
class SizeRange;
struct StreamConfiguration;
+enum class Transform;
class CIO2Device
{
@@ -38,7 +39,8 @@ public:
std::vector<SizeRange> sizes(const PixelFormat &format) const;
int init(const MediaDevice *media, unsigned int index);
- int configure(const Size &size, V4L2DeviceFormat *outputFormat);
+ int configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat);
StreamConfiguration generateConfiguration(Size size) const;
diff --git a/src/libcamera/pipeline/ipu3/frames.cpp b/src/libcamera/pipeline/ipu3/frames.cpp
index a4c3477c..88eb9d05 100644
--- a/src/libcamera/pipeline/ipu3/frames.cpp
+++ b/src/libcamera/pipeline/ipu3/frames.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.cpp - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#include "frames.h"
diff --git a/src/libcamera/pipeline/ipu3/frames.h b/src/libcamera/pipeline/ipu3/frames.h
index 6e3cb915..a347b66f 100644
--- a/src/libcamera/pipeline/ipu3/frames.h
+++ b/src/libcamera/pipeline/ipu3/frames.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.h - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
index 59305f85..7be78091 100644
--- a/src/libcamera/pipeline/ipu3/imgu.cpp
+++ b/src/libcamera/pipeline/ipu3/imgu.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.cpp - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#include "imgu.h"
@@ -504,7 +504,7 @@ int ImgUDevice::configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputF
LOG(IPU3, Debug) << "ImgU BDS rectangle = " << bds;
V4L2SubdeviceFormat gdcFormat = {};
- gdcFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
+ gdcFormat.code = MEDIA_BUS_FMT_FIXED;
gdcFormat.size = pipeConfig.gdc;
ret = imgu_->setFormat(PAD_INPUT, &gdcFormat);
@@ -543,7 +543,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat imguFormat = {};
- imguFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
+ imguFormat.code = MEDIA_BUS_FMT_FIXED;
imguFormat.size = cfg.size;
int ret = imgu_->setFormat(pad, &imguFormat);
@@ -558,7 +558,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
return 0;
*outputFormat = {};
- outputFormat->fourcc = V4L2PixelFormat::fromPixelFormat(formats::NV12);
+ outputFormat->fourcc = dev->toV4L2PixelFormat(formats::NV12);
outputFormat->size = cfg.size;
outputFormat->planesCount = 2;
diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
index 0af4dd8a..fa508316 100644
--- a/src/libcamera/pipeline/ipu3/imgu.h
+++ b/src/libcamera/pipeline/ipu3/imgu.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.h - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index b7dda282..066fd4a2 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipu3.cpp - Pipeline handler for Intel IPU3
+ * Pipeline handler for Intel IPU3
*/
#include <algorithm>
@@ -11,6 +11,8 @@
#include <queue>
#include <vector>
+#include <linux/intel-ipu3.h>
+
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -49,7 +51,7 @@ class IPU3CameraData : public Camera::Private
{
public:
IPU3CameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), supportsFlips_(false)
+ : Camera::Private(pipe)
{
}
@@ -71,8 +73,6 @@ public:
Stream rawStream_;
Rectangle cropRegion_;
- bool supportsFlips_;
- Transform rotationTransform_;
std::unique_ptr<DelayedControls> delayedCtrls_;
IPU3Frames frameInfos_;
@@ -134,8 +134,8 @@ public:
PipelineHandlerIPU3(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -182,48 +182,15 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
if (config_.empty())
return Invalid;
- Transform combined = transform * data_->rotationTransform_;
-
- /*
- * We combine the platform and user transform, but must "adjust away"
- * any combined result that includes a transposition, as we can't do
- * those. In this case, flipping only the transpose bit is helpful to
- * applications - they either get the transform they requested, or have
- * to do a simple transpose themselves (they don't have to worry about
- * the other possible cases).
- */
- if (!!(combined & Transform::Transpose)) {
- /*
- * Flipping the transpose bit in "transform" flips it in the
- * combined result too (as it's the last thing that happens),
- * which is of course clearing it.
- */
- transform ^= Transform::Transpose;
- combined &= ~Transform::Transpose;
- status = Adjusted;
- }
-
/*
- * We also check if the sensor doesn't do h/vflips at all, in which
- * case we clear them, and the application will have to do everything.
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
*/
- if (!data_->supportsFlips_ && !!combined) {
- /*
- * If the sensor can do no transforms, then combined must be
- * changed to the identity. The only user transform that gives
- * rise to this is the inverse of the rotation. (Recall that
- * combined = transform * rotationTransform.)
- */
- transform = -data_->rotationTransform_;
- combined = Transform::Identity;
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->cio2_.sensor()->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
- }
-
- /*
- * Store the final combined transform that configure() will need to
- * apply to the sensor to save us working it out again.
- */
- combinedTransform_ = combined;
/* Cap the number of entries to the available streams. */
if (config_.size() > kMaxStreams) {
@@ -243,6 +210,7 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
*/
unsigned int rawCount = 0;
unsigned int yuvCount = 0;
+ Size rawRequirement;
Size maxYuvSize;
Size rawSize;
@@ -251,10 +219,11 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
rawCount++;
- rawSize.expandTo(cfg.size);
+ rawSize = std::max(rawSize, cfg.size);
} else {
yuvCount++;
- maxYuvSize.expandTo(cfg.size);
+ maxYuvSize = std::max(maxYuvSize, cfg.size);
+ rawRequirement.expandTo(cfg.size);
}
}
@@ -283,17 +252,17 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
* The output YUV streams will be limited in size to the maximum frame
* size requested for the RAW stream, if present.
*
- * If no raw stream is requested generate a size as large as the maximum
- * requested YUV size aligned to the ImgU constraints and bound by the
- * sensor's maximum resolution. See
+ * If no raw stream is requested, generate a size from the largest YUV
+ * stream, aligned to the ImgU constraints and bound
+ * by the sensor's maximum resolution. See
* https://bugs.libcamera.org/show_bug.cgi?id=32
*/
if (rawSize.isNull())
- rawSize = maxYuvSize.expandedTo({ ImgUDevice::kIFMaxCropWidth,
- ImgUDevice::kIFMaxCropHeight })
- .grownBy({ ImgUDevice::kOutputMarginWidth,
- ImgUDevice::kOutputMarginHeight })
- .boundedTo(data_->cio2_.sensor()->resolution());
+ rawSize = rawRequirement.expandedTo({ ImgUDevice::kIFMaxCropWidth,
+ ImgUDevice::kIFMaxCropHeight })
+ .grownBy({ ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight })
+ .boundedTo(data_->cio2_.sensor()->resolution());
cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize);
if (!cio2Configuration_.pixelFormat.isValid())
@@ -420,11 +389,12 @@ PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerIPU3::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
IPU3CameraData *data = cameraData(camera);
- IPU3CameraConfiguration *config = new IPU3CameraConfiguration(data);
+ std::unique_ptr<IPU3CameraConfiguration> config =
+ std::make_unique<IPU3CameraConfiguration>(data);
if (roles.empty())
return config;
@@ -490,7 +460,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
default:
LOG(IPU3, Error)
<< "Requested stream role not supported: " << role;
- delete config;
return nullptr;
}
@@ -552,7 +521,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
return ret;
/*
- * \todo: Enable links selectively based on the requested streams.
+ * \todo Enable links selectively based on the requested streams.
* As of now, enable all links unconditionally.
* \todo Don't configure the ImgU at all if we only have a single
* stream which is for raw capture, in which case no buffers will
@@ -568,7 +537,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
*/
const Size &sensorSize = config->cio2Format().size;
V4L2DeviceFormat cio2Format;
- ret = cio2->configure(sensorSize, &cio2Format);
+ ret = cio2->configure(sensorSize, config->combinedTransform_, &cio2Format);
if (ret)
return ret;
@@ -577,24 +546,6 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
data->cropRegion_ = sensorInfo.analogCrop;
/*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- ControlList sensorCtrls(cio2->sensor()->controls());
- sensorCtrls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::HFlip)));
- sensorCtrls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::VFlip)));
-
- ret = cio2->sensor()->setControls(&sensorCtrls);
- if (ret)
- return ret;
- }
-
- /*
* If the ImgU gets configured, its driver seems to expect that
* buffers will be queued to its outputs, as otherwise the next
* capture session that uses the ImgU fails when queueing
@@ -1143,25 +1094,12 @@ int PipelineHandlerIPU3::registerCameras()
&IPU3CameraData::frameStart);
/* Convert the sensor rotation to a transformation */
- int32_t rotation = 0;
- if (data->properties_.contains(properties::Rotation))
- rotation = data->properties_.get(properties::Rotation);
- else
+ const auto &rotation = data->properties_.get(properties::Rotation);
+ if (!rotation)
LOG(IPU3, Warning) << "Rotation control not exposed by "
<< cio2->sensor()->id()
<< ". Assume rotation 0";
- bool success;
- data->rotationTransform_ = transformFromRotation(rotation, &success);
- if (!success)
- LOG(IPU3, Warning) << "Invalid rotation of " << rotation
- << " degrees: ignoring";
-
- ControlList ctrls = cio2->sensor()->getControls({ V4L2_CID_HFLIP });
- if (!ctrls.empty())
- /* We assume the sensor supports VFLIP too. */
- data->supportsFlips_ = true;
-
/**
* \todo Dynamically assign ImgU and output devices to each
* stream and camera; as of now, limit support to two cameras
@@ -1244,8 +1182,16 @@ int IPU3CameraData::loadIPA()
if (ret)
return ret;
- ret = ipa_->init(IPASettings{ "", sensor->model() }, sensorInfo,
- sensor->controls(), &ipaControls_);
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
+ if (ipaTuningFile.empty())
+ ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
+
+ ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ sensorInfo, sensor->controls(), &ipaControls_);
if (ret) {
LOG(IPU3, Error) << "Failed to initialise the IPU3 IPA";
return ret;
@@ -1289,6 +1235,8 @@ void IPU3CameraData::paramsBufferReady(unsigned int id)
imgu_->viewfinder_->queueBuffer(outbuffer);
}
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct ipu3_uapi_params);
imgu_->param_->queueBuffer(info->paramBuffer);
imgu_->stat_->queueBuffer(info->statBuffer);
imgu_->input_->queueBuffer(info->rawBuffer);
@@ -1330,8 +1278,9 @@ void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
request->metadata().set(controls::draft::PipelineDepth, 3);
/* \todo Actually apply the scaler crop region to the ImgU. */
- if (request->controls().contains(controls::ScalerCrop))
- cropRegion_ = request->controls().get(controls::ScalerCrop);
+ const auto &scalerCrop = request->controls().get(controls::ScalerCrop);
+ if (scalerCrop)
+ cropRegion_ = *scalerCrop;
request->metadata().set(controls::ScalerCrop, cropRegion_);
if (frameInfos_.tryComplete(info))
@@ -1424,7 +1373,7 @@ void IPU3CameraData::statBufferReady(FrameBuffer *buffer)
return;
}
- ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp),
+ ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
info->statBuffer->cookie(), info->effectiveSensorControls);
}
@@ -1455,14 +1404,12 @@ void IPU3CameraData::frameStart(uint32_t sequence)
Request *request = processingRequests_.front();
processingRequests_.pop();
- if (!request->controls().contains(controls::draft::TestPatternMode))
+ const auto &testPatternMode = request->controls().get(controls::draft::TestPatternMode);
+ if (!testPatternMode)
return;
- const int32_t testPatternMode = request->controls().get(
- controls::draft::TestPatternMode);
-
int ret = cio2_.sensor()->setTestPatternMode(
- static_cast<controls::draft::TestPatternModeEnum>(testPatternMode));
+ static_cast<controls::draft::TestPatternModeEnum>(*testPatternMode));
if (ret) {
LOG(IPU3, Error) << "Failed to set test pattern mode: "
<< ret;
@@ -1470,9 +1417,9 @@ void IPU3CameraData::frameStart(uint32_t sequence)
}
request->metadata().set(controls::draft::TestPatternMode,
- testPatternMode);
+ *testPatternMode);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3, "ipu3")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/mali-c55.cpp b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
new file mode 100644
index 00000000..45c71c1d
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
@@ -0,0 +1,1066 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Pipeline Handler for ARM's Mali-C55 ISP
+ */
+
+#include <algorithm>
+#include <array>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <linux/media-bus-format.h>
+#include <linux/media.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace {
+
+bool isFormatRaw(const libcamera::PixelFormat &pixFmt)
+{
+ return libcamera::PixelFormatInfo::info(pixFmt).colourEncoding ==
+ libcamera::PixelFormatInfo::ColourEncodingRAW;
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(MaliC55)
+
+const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = {
+ /* \todo Support all formats supported by the driver in libcamera. */
+
+ { formats::RGB565, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::UYVY, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::R8, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 },
+
+ /* RAW formats, FR pipe only. */
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 },
+ { formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 },
+ { formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 },
+ { formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 },
+};
+
+constexpr Size kMaliC55MinSize = { 128, 128 };
+constexpr Size kMaliC55MaxSize = { 8192, 8192 };
+constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36;
+
+class MaliC55CameraData : public Camera::Private
+{
+public:
+ MaliC55CameraData(PipelineHandler *pipe, MediaEntity *entity)
+ : Camera::Private(pipe), entity_(entity)
+ {
+ }
+
+ int init();
+
+ /* Deflect these functionalities to either TPG or CameraSensor. */
+ const std::vector<unsigned int> mbusCodes() const;
+ const std::vector<Size> sizes(unsigned int mbusCode) const;
+ const Size resolution() const;
+
+ PixelFormat bestRawFormat() const;
+
+ PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const;
+ Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+
+ MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+ std::unique_ptr<V4L2Subdevice> sd_;
+ Stream frStream_;
+ Stream dsStream_;
+
+private:
+ void initTPGData();
+
+ std::string id_;
+ std::vector<unsigned int> tpgCodes_;
+ std::vector<Size> tpgSizes_;
+ Size tpgResolution_;
+};
+
+int MaliC55CameraData::init()
+{
+ int ret;
+
+ sd_ = std::make_unique<V4L2Subdevice>(entity_);
+ ret = sd_->open();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to open sensor subdevice";
+ return ret;
+ }
+
+ /* If this camera is created from TPG, we return here. */
+ if (entity_->name() == "mali-c55 tpg") {
+ initTPGData();
+ return 0;
+ }
+
+ /*
+ * Register a CameraSensor if we connect to a sensor and create
+ * an entity for the connected CSI-2 receiver.
+ */
+ sensor_ = std::make_unique<CameraSensor>(entity_);
+ ret = sensor_->init();
+ if (ret)
+ return ret;
+
+ const MediaPad *sourcePad = entity_->getPadByIndex(0);
+ MediaEntity *csiEntity = sourcePad->links()[0]->sink()->entity();
+
+ csi_ = std::make_unique<V4L2Subdevice>(csiEntity);
+ if (csi_->open()) {
+ LOG(MaliC55, Error) << "Failed to open CSI-2 subdevice";
+ return false;
+ }
+
+ return 0;
+}
+
+void MaliC55CameraData::initTPGData()
+{
+ /* Replicate the CameraSensor implementation for TPG. */
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return;
+
+ tpgCodes_ = utils::map_keys(formats);
+ std::sort(tpgCodes_.begin(), tpgCodes_.end());
+
+ for (const auto &format : formats) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(tpgSizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ tpgResolution_ = tpgSizes_.back();
+}
+
+const std::vector<unsigned int> MaliC55CameraData::mbusCodes() const
+{
+ if (sensor_)
+ return sensor_->mbusCodes();
+
+ return tpgCodes_;
+}
+
+const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const
+{
+ if (sensor_)
+ return sensor_->sizes(mbusCode);
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return {};
+
+ std::vector<Size> sizes;
+ const auto &format = formats.find(mbusCode);
+ if (format == formats.end())
+ return {};
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+const Size MaliC55CameraData::resolution() const
+{
+ if (sensor_)
+ return sensor_->resolution();
+
+ return tpgResolution_;
+}
+
+PixelFormat MaliC55CameraData::bestRawFormat() const
+{
+ unsigned int bitDepth = 0;
+ PixelFormat rawFormat;
+
+ /*
+ * Iterate over all the supported PixelFormat and find the one
+ * supported by the camera with the largest bitdepth.
+ */
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ if (!isFormatRaw(pixFmt))
+ continue;
+
+ unsigned int rawCode = maliFormat.second;
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ continue;
+
+ BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
+ if (bayer.bitDepth > bitDepth) {
+ bitDepth = bayer.bitDepth;
+ rawFormat = pixFmt;
+ }
+ }
+
+ return rawFormat;
+}
+
+/*
+ * Make sure the provided raw pixel format is supported and adjust it to
+ * one of the supported ones if it's not.
+ */
+PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
+{
+ /* Make sure the provided raw format is supported by the pipeline. */
+ auto it = maliC55FmtToCode.find(rawFmt);
+ if (it == maliC55FmtToCode.end())
+ return bestRawFormat();
+
+ /* Now make sure the RAW mbus code is supported by the image source. */
+ unsigned int rawCode = it->second;
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ return bestRawFormat();
+
+ return rawFmt;
+}
+
+Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &rawSize) const
+{
+ /* Just make sure the format is supported. */
+ auto it = maliC55FmtToCode.find(rawFmt);
+ if (it == maliC55FmtToCode.end())
+ return {};
+
+ /* Check if the size is natively supported. */
+ unsigned int rawCode = it->second;
+ const auto rawSizes = sizes(rawCode);
+ auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize);
+ if (sizeIt != rawSizes.end())
+ return rawSize;
+
+ /* Or adjust it to the closest supported size. */
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ Size bestSize;
+ for (const Size &size : rawSizes) {
+ uint16_t dist = std::abs(static_cast<int>(rawSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(rawSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+
+ return bestSize;
+}
+
+class MaliC55CameraConfiguration : public CameraConfiguration
+{
+public:
+ MaliC55CameraConfiguration(MaliC55CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ static constexpr unsigned int kMaxStreams = 2;
+
+ const MaliC55CameraData *data_;
+};
+
+CameraConfiguration::Status MaliC55CameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Only 2 streams available. */
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
+ status = Adjusted;
+ }
+
+ bool frPipeAvailable = true;
+ StreamConfiguration *rawConfig = nullptr;
+ for (StreamConfiguration &config : config_) {
+ if (!isFormatRaw(config.pixelFormat))
+ continue;
+
+ if (rawConfig) {
+ LOG(MaliC55, Error)
+ << "Only a single RAW stream is supported";
+ return Invalid;
+ }
+
+ rawConfig = &config;
+ }
+
+ Size maxSize = kMaliC55MaxSize;
+ if (rawConfig) {
+ /*
+ * \todo Take into account the Bayer components ordering once
+ * we support rotations.
+ */
+ PixelFormat rawFormat =
+ data_->adjustRawFormat(rawConfig->pixelFormat);
+ if (rawFormat != rawConfig->pixelFormat) {
+ LOG(MaliC55, Debug)
+ << "RAW format adjusted to " << rawFormat;
+ rawConfig->pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ Size rawSize =
+ data_->adjustRawSizes(rawFormat, rawConfig->size);
+ if (rawSize != rawConfig->size) {
+ LOG(MaliC55, Debug)
+ << "RAW sizes adjusted to " << rawSize;
+ rawConfig->size = rawSize;
+ status = Adjusted;
+ }
+
+ maxSize = rawSize;
+
+ rawConfig->setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ }
+
+ /* Adjust processed streams. */
+ Size maxYuvSize;
+ for (StreamConfiguration &config : config_) {
+ if (isFormatRaw(config.pixelFormat))
+ continue;
+
+ /* Adjust format and size for processed streams. */
+ const auto it = maliC55FmtToCode.find(config.pixelFormat);
+ if (it == maliC55FmtToCode.end()) {
+ LOG(MaliC55, Debug)
+ << "Format adjusted to " << formats::RGB565;
+ config.pixelFormat = formats::RGB565;
+ status = Adjusted;
+ }
+
+ Size size = std::clamp(config.size, kMaliC55MinSize, maxSize);
+ if (size != config.size) {
+ LOG(MaliC55, Debug)
+ << "Size adjusted to " << size;
+ config.size = size;
+ status = Adjusted;
+ }
+
+ if (maxYuvSize < size)
+ maxYuvSize = size;
+
+ if (frPipeAvailable) {
+ config.setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ } else {
+ config.setStream(const_cast<Stream *>(&data_->dsStream_));
+ }
+ }
+
+ /* Compute the sensor format. */
+
+ /* If there's a RAW config, sensor configuration follows it. */
+ if (rawConfig) {
+ const auto it = maliC55FmtToCode.find(rawConfig->pixelFormat);
+ sensorFormat_.code = it->second;
+ sensorFormat_.size = rawConfig->size;
+
+ return status;
+ }
+
+ /* If there's no RAW config, compute the sensor configuration here. */
+ PixelFormat rawFormat = data_->bestRawFormat();
+ const auto it = maliC55FmtToCode.find(rawFormat);
+ sensorFormat_.code = it->second;
+
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ const auto sizes = data_->sizes(it->second);
+ Size bestSize;
+ for (const auto &size : sizes) {
+ /* Skip sensor sizes that are smaller than the max YUV size. */
+ if (maxYuvSize.width > size.width ||
+ maxYuvSize.height > size.height)
+ continue;
+
+ uint16_t dist = std::abs(static_cast<int>(maxYuvSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(maxYuvSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+ sensorFormat_.size = bestSize;
+
+ LOG(MaliC55, Debug) << "Computed sensor configuration " << sensorFormat_;
+
+ return status;
+}
+
+class PipelineHandlerMaliC55 : public PipelineHandler
+{
+public:
+ PipelineHandlerMaliC55(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ void bufferReady(FrameBuffer *buffer);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ struct MaliC55Pipe {
+ std::unique_ptr<V4L2Subdevice> resizer;
+ std::unique_ptr<V4L2VideoDevice> cap;
+ Stream *stream;
+ };
+
+ enum {
+ MaliC55FR,
+ MaliC55DS,
+ MaliC55NumPipes,
+ };
+
+ MaliC55CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<MaliC55CameraData *>(camera->_d());
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, Stream *stream)
+ {
+ if (stream == &data->frStream_)
+ return &pipes_[MaliC55FR];
+ else if (stream == &data->dsStream_)
+ return &pipes_[MaliC55DS];
+ else
+ LOG(MaliC55, Fatal) << "Stream " << stream << " not valid";
+ return nullptr;
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, const Stream *stream)
+ {
+ return pipeFromStream(data, const_cast<Stream *>(stream));
+ }
+
+ void resetPipes()
+ {
+ for (MaliC55Pipe &pipe : pipes_)
+ pipe.stream = nullptr;
+ }
+
+ int configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+ int configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+
+ void registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name);
+ bool registerTPGCamera(MediaLink *link);
+ bool registerSensorCamera(MediaLink *link);
+
+ MediaDevice *media_;
+ std::unique_ptr<V4L2Subdevice> isp_;
+
+ std::array<MaliC55Pipe, MaliC55NumPipes> pipes_;
+
+ bool dsFitted_;
+};
+
+PipelineHandlerMaliC55::PipelineHandlerMaliC55(CameraManager *manager)
+ : PipelineHandler(manager), dsFitted_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerMaliC55::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<MaliC55CameraConfiguration>(data);
+ bool frPipeAvailable = true;
+
+ if (roles.empty())
+ return config;
+
+ /* Check if one stream is RAW to reserve the FR pipe for it. */
+ if (std::find(roles.begin(), roles.end(), StreamRole::Raw) != roles.end())
+ frPipeAvailable = false;
+
+ for (const StreamRole &role : roles) {
+ struct MaliC55Pipe *pipe;
+
+ /* Assign pipe for this role. */
+ if (role == StreamRole::Raw) {
+ pipe = &pipes_[MaliC55FR];
+ } else {
+ if (frPipeAvailable) {
+ pipe = &pipes_[MaliC55FR];
+ frPipeAvailable = false;
+ } else {
+ pipe = &pipes_[MaliC55DS];
+ }
+ }
+
+ Size size = std::min(Size{ 1920, 1080 }, data->resolution());
+ PixelFormat pixelFormat;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ size = data->resolution();
+ [[fallthrough]];
+ case StreamRole::VideoRecording:
+ pixelFormat = formats::NV12;
+ break;
+
+ case StreamRole::Viewfinder:
+ pixelFormat = formats::RGB565;
+ break;
+
+ case StreamRole::Raw:
+ pixelFormat = data->bestRawFormat();
+ if (!pixelFormat.isValid()) {
+ LOG(MaliC55, Error)
+ << "Camera does not support RAW formats";
+ return nullptr;
+ }
+
+ size = data->resolution();
+ break;
+
+ default:
+ LOG(MaliC55, Error)
+ << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> formats;
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ bool isRaw = isFormatRaw(pixFmt);
+
+ /* RAW formats are only supported on the FR pipe. */
+ if (pipe != &pipes_[MaliC55FR] && isRaw)
+ continue;
+
+ if (isRaw) {
+ /* Make sure the mbus code is supported. */
+ unsigned int rawCode = maliFormat.second;
+ const auto sizes = data->sizes(rawCode);
+ if (sizes.empty())
+ continue;
+
+ /* And list all sizes the sensor can produce. */
+ std::vector<SizeRange> sizeRanges;
+ std::transform(sizes.begin(), sizes.end(),
+ std::back_inserter(sizeRanges),
+ [](const Size &s) {
+ return SizeRange(s);
+ });
+
+ formats[pixFmt] = sizeRanges;
+ } else {
+ /* Processed formats are always available. */
+ Size maxSize = std::min(kMaliC55MaxSize,
+ data->resolution());
+ formats[pixFmt] = { kMaliC55MinSize, maxSize };
+ }
+ }
+
+ StreamFormats streamFormats(formats);
+ StreamConfiguration cfg(streamFormats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+ cfg.size = size;
+
+ config->addConfiguration(cfg);
+ }
+
+ if (config->validate() == CameraConfiguration::Invalid)
+ return nullptr;
+
+ return config;
+}
+
+int PipelineHandlerMaliC55::configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (pipe != &pipes_[MaliC55FR]) {
+ LOG(MaliC55, Fatal) << "Only the FR pipe supports RAW capture.";
+ return -EINVAL;
+ }
+
+ /* Enable the debayer route to set fixed internal format on pad #0. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ unsigned int rawCode = subdevFormat.code;
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* Enable the bypass route and apply RAW formats there. */
+ routing.clear();
+ routing.emplace_back(V4L2Subdevice::Stream{ 2, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = rawCode;
+ ret = pipe->resizer->setFormat(2, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /* Enable the debayer route on the resizer pipe. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* \todo Configure the resizer crop/compose rectangles. */
+ Rectangle ispCrop = { 0, 0, config.size };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCrop);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second;
+ return pipe->resizer->setFormat(1, &subdevFormat);
+}
+
+int PipelineHandlerMaliC55::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ resetPipes();
+
+ int ret = media_->disableLinks();
+ if (ret)
+ return ret;
+
+ /* Link the graph depending if we are operating the TPG or a sensor. */
+ MaliC55CameraData *data = cameraData(camera);
+ if (data->csi_) {
+ const MediaEntity *csiEntity = data->csi_->entity();
+ ret = csiEntity->getPadByIndex(1)->links()[0]->setEnabled(true);
+ } else {
+ ret = data->entity_->getPadByIndex(0)->links()[0]->setEnabled(true);
+ }
+ if (ret)
+ return ret;
+
+ MaliC55CameraConfiguration *maliConfig =
+ static_cast<MaliC55CameraConfiguration *>(config);
+ V4L2SubdeviceFormat subdevFormat = maliConfig->sensorFormat_;
+ ret = data->sd_->getFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ if (data->csi_) {
+ ret = data->csi_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = data->csi_->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Propagate the format to the ISP sink pad and configure the input
+ * crop rectangle (no crop at the moment).
+ *
+ * \todo Configure the CSI-2 receiver.
+ */
+ ret = isp_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ Rectangle ispCrop(0, 0, subdevFormat.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the resizer: fixed format the sink pad; use the media
+ * bus code associated with the desired capture format on the source
+ * pad.
+ *
+ * Configure the crop and compose rectangles to match the desired
+ * stream output size
+ *
+ * \todo Make the crop/scaler configurable
+ */
+ for (const StreamConfiguration &streamConfig : *config) {
+ Stream *stream = streamConfig.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (isFormatRaw(streamConfig.pixelFormat))
+ ret = configureRawStream(data, streamConfig, subdevFormat);
+ else
+ ret = configureProcessedStream(data, streamConfig, subdevFormat);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure pipeline";
+ return ret;
+ }
+
+ /* Now apply the pixel format and size to the capture device. */
+ V4L2DeviceFormat captureFormat;
+ captureFormat.fourcc = pipe->cap->toV4L2PixelFormat(streamConfig.pixelFormat);
+ captureFormat.size = streamConfig.size;
+
+ ret = pipe->cap->setFormat(&captureFormat);
+ if (ret)
+ return ret;
+
+ pipe->stream = stream;
+ }
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return pipe->cap->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerMaliC55::start([[maybe_unused]] Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ Stream *stream = pipe.stream;
+
+ int ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to import buffers";
+ return ret;
+ }
+
+ ret = pipe.cap->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stream";
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::stopDevice([[maybe_unused]] Camera *camera)
+{
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ pipe.cap->streamOff();
+ pipe.cap->releaseBuffers();
+ }
+}
+
+int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request)
+{
+ int ret;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+
+ ret = pipe->cap->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ completeBuffer(request, buffer);
+
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+void PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name)
+{
+ std::set<Stream *> streams{ &data->frStream_ };
+ if (dsFitted_)
+ streams.insert(&data->dsStream_);
+
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data),
+ name, streams);
+ registerCamera(std::move(camera));
+}
+
+/*
+ * The only camera we support through direct connection to the ISP is the
+ * Mali-C55 TPG. Check we have that and warn if not.
+ */
+bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link)
+{
+ const std::string &name = link->source()->entity()->name();
+ if (name != "mali-c55 tpg") {
+ LOG(MaliC55, Warning) << "Unsupported direct connection to "
+ << link->source()->entity()->name();
+ /*
+ * Return true and just skip registering a camera for this
+ * entity.
+ */
+ return true;
+ }
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, link->source()->entity());
+
+ if (data->init())
+ return false;
+
+ registerMaliCamera(std::move(data), name);
+
+ return true;
+}
+
+/*
+ * Register a Camera for each sensor connected to the ISP through a CSI-2
+ * receiver.
+ *
+ * \todo Support more complex topologies, such as video muxes.
+ */
+bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink)
+{
+ MediaEntity *csi2 = ispLink->source()->entity();
+ const MediaPad *csi2Sink = csi2->getPadByIndex(0);
+
+ for (MediaLink *link : csi2Sink->links()) {
+ MediaEntity *sensor = link->source()->entity();
+ unsigned int function = sensor->function();
+
+ if (function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, sensor);
+ if (data->init())
+ return false;
+
+ /* \todo: Init properties and controls. */
+
+ registerMaliCamera(std::move(data), sensor->name());
+ }
+
+ return true;
+}
+
+bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
+{
+ const MediaPad *ispSink;
+
+ /*
+ * We search for just the ISP subdevice and the full resolution pipe.
+ * The TPG and the downscale pipe are both optional blocks and may not
+ * be fitted.
+ */
+ DeviceMatch dm("mali-c55");
+ dm.add("mali-c55 isp");
+ dm.add("mali-c55 resizer fr");
+ dm.add("mali-c55 fr");
+
+ media_ = acquireMediaDevice(enumerator, dm);
+ if (!media_)
+ return false;
+
+ isp_ = V4L2Subdevice::fromEntityName(media_, "mali-c55 isp");
+ if (isp_->open() < 0)
+ return false;
+
+ MaliC55Pipe *frPipe = &pipes_[MaliC55FR];
+ frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr");
+ if (frPipe->resizer->open() < 0)
+ return false;
+
+ frPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 fr");
+ if (frPipe->cap->open() < 0)
+ return false;
+
+ frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
+
+ dsFitted_ = !!media_->getEntityByName("mali-c55 ds");
+ if (dsFitted_) {
+ LOG(MaliC55, Debug) << "Downscaler pipe is fitted";
+
+ MaliC55Pipe *dsPipe = &pipes_[MaliC55DS];
+
+ dsPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer ds");
+ if (dsPipe->resizer->open() < 0)
+ return false;
+
+ dsPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 ds");
+ if (dsPipe->cap->open() < 0)
+ return false;
+
+ dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
+ }
+
+ ispSink = isp_->entity()->getPadByIndex(0);
+ if (!ispSink || ispSink->links().empty()) {
+ LOG(MaliC55, Error) << "ISP sink pad error";
+ return false;
+ }
+
+ /*
+ * We could have several links pointing to the ISP's sink pad, which
+ * will be from entities with one of the following functions:
+ *
+ * MEDIA_ENT_F_CAM_SENSOR - The test pattern generator
+ * MEDIA_ENT_F_VID_IF_BRIDGE - A CSI-2 receiver
+ * MEDIA_ENT_F_IO_V4L - An input device
+ *
+ * The last one will be unsupported for now. The TPG is relatively easy,
+ * we just register a Camera for it. If we have a CSI-2 receiver we need
+ * to check its sink pad and register Cameras for anything connected to
+ * it (probably...there are some complex situations in which that might
+ * not be true but let's pretend they don't exist until we come across
+ * them)
+ */
+ bool registered;
+ for (MediaLink *link : ispSink->links()) {
+ unsigned int function = link->source()->entity()->function();
+
+ switch (function) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ registered = registerTPGCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_VID_IF_BRIDGE:
+ registered = registerSensorCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ LOG(MaliC55, Warning) << "Memory input not yet supported";
+ break;
+ default:
+ LOG(MaliC55, Error) << "Unsupported entity function";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55, "mali-c55")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/meson.build b/src/libcamera/pipeline/mali-c55/meson.build
new file mode 100644
index 00000000..30fd29b9
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'mali-c55.cpp'
+])
diff --git a/src/libcamera/pipeline/meson.build b/src/libcamera/pipeline/meson.build
index 30dc5b97..8a61991c 100644
--- a/src/libcamera/pipeline/meson.build
+++ b/src/libcamera/pipeline/meson.build
@@ -1,5 +1,20 @@
# SPDX-License-Identifier: CC0-1.0
+# Location of pipeline specific configuration files
+pipeline_data_dir = libcamera_datadir / 'pipeline'
+
+# Allow multi-level directory structuring for the pipeline handlers if needed.
+subdirs = []
+
foreach pipeline : pipelines
+ pipeline = pipeline.split('/')[0]
+ if pipeline in subdirs
+ continue
+ endif
+
+ subdirs += pipeline
subdir(pipeline)
+
+ # Don't reuse the pipeline variable below, the subdirectory may have
+ # overwritten it.
endforeach
diff --git a/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp b/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp
deleted file mode 100644
index 69831dab..00000000
--- a/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#include "dma_heaps.h"
-
-#include <array>
-#include <fcntl.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-heap.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <libcamera/base/log.h>
-
-/*
- * /dev/dma-heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
- * to only have to worry about importing.
- *
- * Annoyingly, should the cma heap size be specified on the kernel command line
- * instead of DT, the heap gets named "reserved" instead.
- */
-static constexpr std::array<const char *, 2> heapNames = {
- "/dev/dma_heap/linux,cma",
- "/dev/dma_heap/reserved"
-};
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(RPI)
-
-namespace RPi {
-
-DmaHeap::DmaHeap()
-{
- for (const char *name : heapNames) {
- int ret = ::open(name, O_RDWR, 0);
- if (ret < 0) {
- ret = errno;
- LOG(RPI, Debug) << "Failed to open " << name << ": "
- << strerror(ret);
- continue;
- }
-
- dmaHeapHandle_ = UniqueFD(ret);
- break;
- }
-
- if (!dmaHeapHandle_.isValid())
- LOG(RPI, Error) << "Could not open any dmaHeap device";
-}
-
-DmaHeap::~DmaHeap() = default;
-
-UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
-{
- int ret;
-
- if (!name)
- return {};
-
- struct dma_heap_allocation_data alloc = {};
-
- alloc.len = size;
- alloc.fd_flags = O_CLOEXEC | O_RDWR;
-
- ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap allocation failure for "
- << name;
- return {};
- }
-
- UniqueFD allocFd(alloc.fd);
- ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap naming failure for "
- << name;
- return {};
- }
-
- return allocFd;
-}
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/dma_heaps.h b/src/libcamera/pipeline/raspberrypi/dma_heaps.h
deleted file mode 100644
index d38f41ea..00000000
--- a/src/libcamera/pipeline/raspberrypi/dma_heaps.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#pragma once
-
-#include <stddef.h>
-
-#include <libcamera/base/unique_fd.h>
-
-namespace libcamera {
-
-namespace RPi {
-
-class DmaHeap
-{
-public:
- DmaHeap();
- ~DmaHeap();
- bool isValid() const { return dmaHeapHandle_.isValid(); }
- UniqueFD alloc(const char *name, std::size_t size);
-
-private:
- UniqueFD dmaHeapHandle_;
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
deleted file mode 100644
index 66a84b1d..00000000
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ /dev/null
@@ -1,2200 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Ltd.
- *
- * raspberrypi.cpp - Pipeline handler for Raspberry Pi devices
- */
-#include <algorithm>
-#include <assert.h>
-#include <cmath>
-#include <fcntl.h>
-#include <memory>
-#include <mutex>
-#include <queue>
-#include <unordered_set>
-#include <utility>
-
-#include <libcamera/base/shared_fd.h>
-#include <libcamera/base/utils.h>
-
-#include <libcamera/camera.h>
-#include <libcamera/control_ids.h>
-#include <libcamera/formats.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
-#include <libcamera/logging.h>
-#include <libcamera/property_ids.h>
-#include <libcamera/request.h>
-
-#include <linux/bcm2835-isp.h>
-#include <linux/media-bus-format.h>
-#include <linux/videodev2.h>
-
-#include "libcamera/internal/bayer_format.h"
-#include "libcamera/internal/camera.h"
-#include "libcamera/internal/camera_sensor.h"
-#include "libcamera/internal/delayed_controls.h"
-#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/framebuffer.h"
-#include "libcamera/internal/ipa_manager.h"
-#include "libcamera/internal/media_device.h"
-#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/v4l2_videodevice.h"
-
-#include "dma_heaps.h"
-#include "rpi_stream.h"
-
-using namespace std::chrono_literals;
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(RPI)
-
-namespace {
-
-constexpr unsigned int defaultRawBitDepth = 12;
-
-/* Map of mbus codes to supported sizes reported by the sensor. */
-using SensorFormats = std::map<unsigned int, std::vector<Size>>;
-
-SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
-{
- SensorFormats formats;
-
- for (auto const mbusCode : sensor->mbusCodes())
- formats.emplace(mbusCode, sensor->sizes(mbusCode));
-
- return formats;
-}
-
-PixelFormat mbusCodeToPixelFormat(unsigned int mbus_code,
- BayerFormat::Packing packingReq)
-{
- BayerFormat bayer = BayerFormat::fromMbusCode(mbus_code);
-
- ASSERT(bayer.isValid());
-
- bayer.packing = packingReq;
- PixelFormat pix = bayer.toPixelFormat();
-
- /*
- * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
- * variants. So if the PixelFormat returns as invalid, use the non-packed
- * conversion instead.
- */
- if (!pix.isValid()) {
- bayer.packing = BayerFormat::Packing::None;
- pix = bayer.toPixelFormat();
- }
-
- return pix;
-}
-
-V4L2DeviceFormat toV4L2DeviceFormat(const V4L2SubdeviceFormat &format,
- BayerFormat::Packing packingReq)
-{
- const PixelFormat pix = mbusCodeToPixelFormat(format.mbus_code, packingReq);
- V4L2DeviceFormat deviceFormat;
-
- deviceFormat.fourcc = V4L2PixelFormat::fromPixelFormat(pix);
- deviceFormat.size = format.size;
- deviceFormat.colorSpace = format.colorSpace;
- return deviceFormat;
-}
-
-bool isRaw(const PixelFormat &pixFmt)
-{
- /*
- * The isRaw test might be redundant right now the pipeline handler only
- * supports RAW sensors. Leave it in for now, just as a sanity check.
- */
- if (!pixFmt.isValid())
- return false;
-
- const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
- if (!info.isValid())
- return false;
-
- return info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
-}
-
-double scoreFormat(double desired, double actual)
-{
- double score = desired - actual;
- /* Smaller desired dimensions are preferred. */
- if (score < 0.0)
- score = (-score) / 8;
- /* Penalise non-exact matches. */
- if (actual != desired)
- score *= 2;
-
- return score;
-}
-
-V4L2SubdeviceFormat findBestFormat(const SensorFormats &formatsMap, const Size &req, unsigned int bitDepth)
-{
- double bestScore = std::numeric_limits<double>::max(), score;
- V4L2SubdeviceFormat bestFormat;
- bestFormat.colorSpace = ColorSpace::Raw;
-
- constexpr float penaltyAr = 1500.0;
- constexpr float penaltyBitDepth = 500.0;
-
- /* Calculate the closest/best mode from the user requested size. */
- for (const auto &iter : formatsMap) {
- const unsigned int mbusCode = iter.first;
- const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
- BayerFormat::Packing::None);
- const PixelFormatInfo &info = PixelFormatInfo::info(format);
-
- for (const Size &size : iter.second) {
- double reqAr = static_cast<double>(req.width) / req.height;
- double fmtAr = static_cast<double>(size.width) / size.height;
-
- /* Score the dimensions for closeness. */
- score = scoreFormat(req.width, size.width);
- score += scoreFormat(req.height, size.height);
- score += penaltyAr * scoreFormat(reqAr, fmtAr);
-
- /* Add any penalties... this is not an exact science! */
- score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
-
- if (score <= bestScore) {
- bestScore = score;
- bestFormat.mbus_code = mbusCode;
- bestFormat.size = size;
- }
-
- LOG(RPI, Debug) << "Format: " << size
- << " fmt " << format
- << " Score: " << score
- << " (best " << bestScore << ")";
- }
- }
-
- return bestFormat;
-}
-
-enum class Unicam : unsigned int { Image, Embedded };
-enum class Isp : unsigned int { Input, Output0, Output1, Stats };
-
-} /* namespace */
-
-class RPiCameraData : public Camera::Private
-{
-public:
- RPiCameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), state_(State::Stopped),
- supportsFlips_(false), flipsAlterBayerOrder_(false),
- dropFrameCount_(0), buffersAllocated_(false), ispOutputCount_(0)
- {
- }
-
- ~RPiCameraData()
- {
- freeBuffers();
- }
-
- void freeBuffers();
- void frameStarted(uint32_t sequence);
-
- int loadIPA(ipa::RPi::IPAInitResult *result);
- int configureIPA(const CameraConfiguration *config, ipa::RPi::IPAConfigResult *result);
-
- void enumerateVideoDevices(MediaLink *link);
-
- void statsMetadataComplete(uint32_t bufferId, const ControlList &controls);
- void runIsp(uint32_t bufferId);
- void embeddedComplete(uint32_t bufferId);
- void setIspControls(const ControlList &controls);
- void setDelayedControls(const ControlList &controls);
- void setSensorControls(ControlList &controls);
- void unicamTimeout();
-
- /* bufferComplete signal handlers. */
- void unicamBufferDequeue(FrameBuffer *buffer);
- void ispInputDequeue(FrameBuffer *buffer);
- void ispOutputDequeue(FrameBuffer *buffer);
-
- void clearIncompleteRequests();
- void handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream);
- void handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream);
- void handleState();
- Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
- void applyScalerCrop(const ControlList &controls);
-
- std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
-
- std::unique_ptr<CameraSensor> sensor_;
- SensorFormats sensorFormats_;
- /* Array of Unicam and ISP device streams and associated buffers/streams. */
- RPi::Device<Unicam, 2> unicam_;
- RPi::Device<Isp, 4> isp_;
- /* The vector below is just for convenience when iterating over all streams. */
- std::vector<RPi::Stream *> streams_;
- /* Stores the ids of the buffers mapped in the IPA. */
- std::unordered_set<unsigned int> ipaBuffers_;
- /*
- * Stores a cascade of Video Mux or Bridge devices between the sensor and
- * Unicam together with media link across the entities.
- */
- std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
-
- /* DMAHEAP allocation helper. */
- RPi::DmaHeap dmaHeap_;
- SharedFD lsTable_;
-
- std::unique_ptr<DelayedControls> delayedCtrls_;
- bool sensorMetadata_;
-
- /*
- * All the functions in this class are called from a single calling
- * thread. So, we do not need to have any mutex to protect access to any
- * of the variables below.
- */
- enum class State { Stopped, Idle, Busy, IpaComplete };
- State state_;
-
- struct BayerFrame {
- FrameBuffer *buffer;
- ControlList controls;
- };
-
- std::queue<BayerFrame> bayerQueue_;
- std::queue<FrameBuffer *> embeddedQueue_;
- std::deque<Request *> requestQueue_;
-
- /*
- * Manage horizontal and vertical flips supported (or not) by the
- * sensor. Also store the "native" Bayer order (that is, with no
- * transforms applied).
- */
- bool supportsFlips_;
- bool flipsAlterBayerOrder_;
- BayerFormat::Order nativeBayerOrder_;
-
- /* For handling digital zoom. */
- IPACameraSensorInfo sensorInfo_;
- Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
- Rectangle scalerCrop_; /* crop in sensor native pixels */
- Size ispMinCropSize_;
-
- unsigned int dropFrameCount_;
-
- /*
- * If set, this stores the value that represets a gain of one for
- * the V4L2_CID_NOTIFY_GAINS control.
- */
- std::optional<int32_t> notifyGainsUnity_;
-
- /* Have internal buffers been allocated? */
- bool buffersAllocated_;
-
-private:
- void checkRequestCompleted();
- void fillRequestMetadata(const ControlList &bufferControls,
- Request *request);
- void tryRunPipeline();
- bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
-
- unsigned int ispOutputCount_;
-};
-
-class RPiCameraConfiguration : public CameraConfiguration
-{
-public:
- RPiCameraConfiguration(const RPiCameraData *data);
-
- Status validate() override;
-
- /* Cache the combinedTransform_ that will be applied to the sensor */
- Transform combinedTransform_;
-
-private:
- const RPiCameraData *data_;
-};
-
-class PipelineHandlerRPi : public PipelineHandler
-{
-public:
- PipelineHandlerRPi(CameraManager *manager);
-
- CameraConfiguration *generateConfiguration(Camera *camera, const StreamRoles &roles) override;
- int configure(Camera *camera, CameraConfiguration *config) override;
-
- int exportFrameBuffers(Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
-
- int start(Camera *camera, const ControlList *controls) override;
- void stopDevice(Camera *camera) override;
-
- int queueRequestDevice(Camera *camera, Request *request) override;
-
- bool match(DeviceEnumerator *enumerator) override;
-
-private:
- RPiCameraData *cameraData(Camera *camera)
- {
- return static_cast<RPiCameraData *>(camera->_d());
- }
-
- int registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity);
- int queueAllBuffers(Camera *camera);
- int prepareBuffers(Camera *camera);
- void mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask);
-};
-
-RPiCameraConfiguration::RPiCameraConfiguration(const RPiCameraData *data)
- : CameraConfiguration(), data_(data)
-{
-}
-
-CameraConfiguration::Status RPiCameraConfiguration::validate()
-{
- Status status = Valid;
-
- if (config_.empty())
- return Invalid;
-
- status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
-
- /*
- * What if the platform has a non-90 degree rotation? We can't even
- * "adjust" the configuration and carry on. Alternatively, raising an
- * error means the platform can never run. Let's just print a warning
- * and continue regardless; the rotation is effectively set to zero.
- */
- int32_t rotation = data_->sensor_->properties().get(properties::Rotation);
- bool success;
- Transform rotationTransform = transformFromRotation(rotation, &success);
- if (!success)
- LOG(RPI, Warning) << "Invalid rotation of " << rotation
- << " degrees - ignoring";
- Transform combined = transform * rotationTransform;
-
- /*
- * We combine the platform and user transform, but must "adjust away"
- * any combined result that includes a transform, as we can't do those.
- * In this case, flipping only the transpose bit is helpful to
- * applications - they either get the transform they requested, or have
- * to do a simple transpose themselves (they don't have to worry about
- * the other possible cases).
- */
- if (!!(combined & Transform::Transpose)) {
- /*
- * Flipping the transpose bit in "transform" flips it in the
- * combined result too (as it's the last thing that happens),
- * which is of course clearing it.
- */
- transform ^= Transform::Transpose;
- combined &= ~Transform::Transpose;
- status = Adjusted;
- }
-
- /*
- * We also check if the sensor doesn't do h/vflips at all, in which
- * case we clear them, and the application will have to do everything.
- */
- if (!data_->supportsFlips_ && !!combined) {
- /*
- * If the sensor can do no transforms, then combined must be
- * changed to the identity. The only user transform that gives
- * rise to this the inverse of the rotation. (Recall that
- * combined = transform * rotationTransform.)
- */
- transform = -rotationTransform;
- combined = Transform::Identity;
- status = Adjusted;
- }
-
- /*
- * Store the final combined transform that configure() will need to
- * apply to the sensor to save us working it out again.
- */
- combinedTransform_ = combined;
-
- unsigned int rawCount = 0, outCount = 0, count = 0, maxIndex = 0;
- std::pair<int, Size> outSize[2];
- Size maxSize;
- for (StreamConfiguration &cfg : config_) {
- if (isRaw(cfg.pixelFormat)) {
- /*
- * Calculate the best sensor mode we can use based on
- * the user request.
- */
- const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
- unsigned int bitDepth = info.isValid() ? info.bitsPerPixel : defaultRawBitDepth;
- V4L2SubdeviceFormat sensorFormat = findBestFormat(data_->sensorFormats_, cfg.size, bitDepth);
- BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
- if (info.isValid() && !info.packed)
- packing = BayerFormat::Packing::None;
- V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat,
- packing);
- int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
- if (ret)
- return Invalid;
-
- /*
- * Some sensors change their Bayer order when they are
- * h-flipped or v-flipped, according to the transform.
- * If this one does, we must advertise the transformed
- * Bayer order in the raw stream. Note how we must
- * fetch the "native" (i.e. untransformed) Bayer order,
- * because the sensor may currently be flipped!
- */
- V4L2PixelFormat fourcc = unicamFormat.fourcc;
- if (data_->flipsAlterBayerOrder_) {
- BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
- bayer.order = data_->nativeBayerOrder_;
- bayer = bayer.transform(combined);
- fourcc = bayer.toV4L2PixelFormat();
- }
-
- PixelFormat unicamPixFormat = fourcc.toPixelFormat();
- if (cfg.size != unicamFormat.size ||
- cfg.pixelFormat != unicamPixFormat) {
- cfg.size = unicamFormat.size;
- cfg.pixelFormat = unicamPixFormat;
- status = Adjusted;
- }
-
- cfg.stride = unicamFormat.planes[0].bpl;
- cfg.frameSize = unicamFormat.planes[0].size;
-
- rawCount++;
- } else {
- outSize[outCount] = std::make_pair(count, cfg.size);
- /* Record the largest resolution for fixups later. */
- if (maxSize < cfg.size) {
- maxSize = cfg.size;
- maxIndex = outCount;
- }
- outCount++;
- }
-
- count++;
-
- /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
- if (rawCount > 1 || outCount > 2) {
- LOG(RPI, Error) << "Invalid number of streams requested";
- return Invalid;
- }
- }
-
- /*
- * Now do any fixups needed. For the two ISP outputs, one stream must be
- * equal or smaller than the other in all dimensions.
- */
- for (unsigned int i = 0; i < outCount; i++) {
- outSize[i].second.width = std::min(outSize[i].second.width,
- maxSize.width);
- outSize[i].second.height = std::min(outSize[i].second.height,
- maxSize.height);
-
- if (config_.at(outSize[i].first).size != outSize[i].second) {
- config_.at(outSize[i].first).size = outSize[i].second;
- status = Adjusted;
- }
-
- /*
- * Also validate the correct pixel formats here.
- * Note that Output0 and Output1 support a different
- * set of formats.
- *
- * Output 0 must be for the largest resolution. We will
- * have that fixed up in the code above.
- *
- */
- StreamConfiguration &cfg = config_.at(outSize[i].first);
- PixelFormat &cfgPixFmt = cfg.pixelFormat;
- V4L2VideoDevice *dev;
-
- if (i == maxIndex)
- dev = data_->isp_[Isp::Output0].dev();
- else
- dev = data_->isp_[Isp::Output1].dev();
-
- V4L2VideoDevice::Formats fmts = dev->formats();
-
- if (fmts.find(V4L2PixelFormat::fromPixelFormat(cfgPixFmt)) == fmts.end()) {
- /* If we cannot find a native format, use a default one. */
- cfgPixFmt = formats::NV12;
- status = Adjusted;
- }
-
- V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
- format.size = cfg.size;
- format.colorSpace = cfg.colorSpace;
-
- LOG(RPI, Debug)
- << "Try color space " << ColorSpace::toString(cfg.colorSpace);
-
- int ret = dev->tryFormat(&format);
- if (ret)
- return Invalid;
-
- if (cfg.colorSpace != format.colorSpace) {
- status = Adjusted;
- LOG(RPI, Debug)
- << "Color space changed from "
- << ColorSpace::toString(cfg.colorSpace) << " to "
- << ColorSpace::toString(format.colorSpace);
- }
-
- cfg.colorSpace = format.colorSpace;
-
- cfg.stride = format.planes[0].bpl;
- cfg.frameSize = format.planes[0].size;
-
- }
-
- return status;
-}
-
-PipelineHandlerRPi::PipelineHandlerRPi(CameraManager *manager)
- : PipelineHandler(manager)
-{
-}
-
-CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
-{
- RPiCameraData *data = cameraData(camera);
- CameraConfiguration *config = new RPiCameraConfiguration(data);
- V4L2SubdeviceFormat sensorFormat;
- unsigned int bufferCount;
- PixelFormat pixelFormat;
- V4L2VideoDevice::Formats fmts;
- Size size;
- std::optional<ColorSpace> colorSpace;
-
- if (roles.empty())
- return config;
-
- unsigned int rawCount = 0;
- unsigned int outCount = 0;
- Size sensorSize = data->sensor_->resolution();
- for (const StreamRole role : roles) {
- switch (role) {
- case StreamRole::Raw:
- size = sensorSize;
- sensorFormat = findBestFormat(data->sensorFormats_, size, defaultRawBitDepth);
- pixelFormat = mbusCodeToPixelFormat(sensorFormat.mbus_code,
- BayerFormat::Packing::CSI2);
- ASSERT(pixelFormat.isValid());
- colorSpace = ColorSpace::Raw;
- bufferCount = 2;
- rawCount++;
- break;
-
- case StreamRole::StillCapture:
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::NV12;
- /*
- * Still image codecs usually expect the JPEG color space.
- * Even RGB codecs will be fine as the RGB we get with the
- * JPEG color space is the same as sRGB.
- */
- colorSpace = ColorSpace::Jpeg;
- /* Return the largest sensor resolution. */
- size = sensorSize;
- bufferCount = 1;
- outCount++;
- break;
-
- case StreamRole::VideoRecording:
- /*
- * The colour denoise algorithm requires the analysis
- * image, produced by the second ISP output, to be in
- * YUV420 format. Select this format as the default, to
- * maximize chances that it will be picked by
- * applications and enable usage of the colour denoise
- * algorithm.
- */
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::YUV420;
- /*
- * Choose a color space appropriate for video recording.
- * Rec.709 will be a good default for HD resolutions.
- */
- colorSpace = ColorSpace::Rec709;
- size = { 1920, 1080 };
- bufferCount = 4;
- outCount++;
- break;
-
- case StreamRole::Viewfinder:
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::ARGB8888;
- colorSpace = ColorSpace::Jpeg;
- size = { 800, 600 };
- bufferCount = 4;
- outCount++;
- break;
-
- default:
- LOG(RPI, Error) << "Requested stream role not supported: "
- << role;
- delete config;
- return nullptr;
- }
-
- if (rawCount > 1 || outCount > 2) {
- LOG(RPI, Error) << "Invalid stream roles requested";
- delete config;
- return nullptr;
- }
-
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- if (role == StreamRole::Raw) {
- /* Translate the MBUS codes to a PixelFormat. */
- for (const auto &format : data->sensorFormats_) {
- PixelFormat pf = mbusCodeToPixelFormat(format.first,
- BayerFormat::Packing::CSI2);
- if (pf.isValid())
- deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
- std::forward_as_tuple(format.second.begin(), format.second.end()));
- }
- } else {
- /*
- * Translate the V4L2PixelFormat to PixelFormat. Note that we
- * limit the recommended largest ISP output size to match the
- * sensor resolution.
- */
- for (const auto &format : fmts) {
- PixelFormat pf = format.first.toPixelFormat();
- if (pf.isValid()) {
- const SizeRange &ispSizes = format.second[0];
- deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
- ispSizes.hStep, ispSizes.vStep);
- }
- }
- }
-
- /* Add the stream format based on the device node used for the use case. */
- StreamFormats formats(deviceFormats);
- StreamConfiguration cfg(formats);
- cfg.size = size;
- cfg.pixelFormat = pixelFormat;
- cfg.colorSpace = colorSpace;
- cfg.bufferCount = bufferCount;
- config->addConfiguration(cfg);
- }
-
- config->validate();
-
- return config;
-}
-
-int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- /* Start by freeing all buffers and reset the Unicam and ISP stream states. */
- data->freeBuffers();
- for (auto const stream : data->streams_)
- stream->setExternal(false);
-
- BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
- Size maxSize, sensorSize;
- unsigned int maxIndex = 0;
- bool rawStream = false;
- unsigned int bitDepth = defaultRawBitDepth;
-
- /*
- * Look for the RAW stream (if given) size as well as the largest
- * ISP output size.
- */
- for (unsigned i = 0; i < config->size(); i++) {
- StreamConfiguration &cfg = config->at(i);
-
- if (isRaw(cfg.pixelFormat)) {
- /*
- * If we have been given a RAW stream, use that size
- * for setting up the sensor.
- */
- sensorSize = cfg.size;
- rawStream = true;
- /* Check if the user has explicitly set an unpacked format. */
- BayerFormat bayerFormat = BayerFormat::fromPixelFormat(cfg.pixelFormat);
- packing = bayerFormat.packing;
- bitDepth = bayerFormat.bitDepth;
- } else {
- if (cfg.size > maxSize) {
- maxSize = config->at(i).size;
- maxIndex = i;
- }
- }
- }
-
- /*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- const RPiCameraConfiguration *rpiConfig =
- static_cast<const RPiCameraConfiguration *>(config);
- ControlList controls;
-
- controls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::HFlip)));
- controls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::VFlip)));
- data->setSensorControls(controls);
- }
-
- /* First calculate the best sensor mode we can use based on the user request. */
- V4L2SubdeviceFormat sensorFormat = findBestFormat(data->sensorFormats_, rawStream ? sensorSize : maxSize, bitDepth);
- ret = data->sensor_->setFormat(&sensorFormat);
- if (ret)
- return ret;
-
- V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat, packing);
- ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
- if (ret)
- return ret;
-
- LOG(RPI, Info) << "Sensor: " << camera->id()
- << " - Selected sensor format: " << sensorFormat
- << " - Selected unicam format: " << unicamFormat;
-
- ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
- if (ret)
- return ret;
-
- /*
- * See which streams are requested, and route the user
- * StreamConfiguration appropriately.
- */
- V4L2DeviceFormat format;
- bool output0Set = false, output1Set = false;
- for (unsigned i = 0; i < config->size(); i++) {
- StreamConfiguration &cfg = config->at(i);
-
- if (isRaw(cfg.pixelFormat)) {
- cfg.setStream(&data->unicam_[Unicam::Image]);
- data->unicam_[Unicam::Image].setExternal(true);
- continue;
- }
-
- /* The largest resolution gets routed to the ISP Output 0 node. */
- RPi::Stream *stream = i == maxIndex ? &data->isp_[Isp::Output0]
- : &data->isp_[Isp::Output1];
-
- V4L2PixelFormat fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
- format.size = cfg.size;
- format.fourcc = fourcc;
- format.colorSpace = cfg.colorSpace;
-
- LOG(RPI, Debug) << "Setting " << stream->name() << " to "
- << format;
-
- ret = stream->dev()->setFormat(&format);
- if (ret)
- return -EINVAL;
-
- if (format.size != cfg.size || format.fourcc != fourcc) {
- LOG(RPI, Error)
- << "Failed to set requested format on " << stream->name()
- << ", returned " << format;
- return -EINVAL;
- }
-
- LOG(RPI, Debug)
- << "Stream " << stream->name() << " has color space "
- << ColorSpace::toString(cfg.colorSpace);
-
- cfg.setStream(stream);
- stream->setExternal(true);
-
- if (i != maxIndex)
- output1Set = true;
- else
- output0Set = true;
- }
-
- /*
- * If ISP::Output0 stream has not been configured by the application,
- * we must allow the hardware to generate an output so that the data
- * flow in the pipeline handler remains consistent, and we still generate
- * statistics for the IPA to use. So enable the output at a very low
- * resolution for internal use.
- *
- * \todo Allow the pipeline to work correctly without Output0 and only
- * statistics coming from the hardware.
- */
- if (!output0Set) {
- maxSize = Size(320, 240);
- format = {};
- format.size = maxSize;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::YUV420);
- /* No one asked for output, so the color space doesn't matter. */
- format.colorSpace = ColorSpace::Jpeg;
- ret = data->isp_[Isp::Output0].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error)
- << "Failed to set default format on ISP Output0: "
- << ret;
- return -EINVAL;
- }
-
- LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
- << format;
- }
-
- /*
- * If ISP::Output1 stream has not been requested by the application, we
- * set it up for internal use now. This second stream will be used for
- * fast colour denoise, and must be a quarter resolution of the ISP::Output0
- * stream. However, also limit the maximum size to 1200 pixels in the
- * larger dimension, just to avoid being wasteful with buffer allocations
- * and memory bandwidth.
- *
- * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
- * colour denoise will not run.
- */
- if (!output1Set) {
- V4L2DeviceFormat output1Format;
- constexpr Size maxDimensions(1200, 1200);
- const Size limit = maxDimensions.boundedToAspectRatio(format.size);
-
- output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
- output1Format.colorSpace = format.colorSpace;
- output1Format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::YUV420);
-
- LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
- << output1Format;
-
- ret = data->isp_[Isp::Output1].dev()->setFormat(&output1Format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on ISP Output1: "
- << ret;
- return -EINVAL;
- }
- }
-
- /* ISP statistics output format. */
- format = {};
- format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
- ret = data->isp_[Isp::Stats].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
- << format;
- return ret;
- }
-
- /* Figure out the smallest selection the ISP will allow. */
- Rectangle testCrop(0, 0, 1, 1);
- data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
- data->ispMinCropSize_ = testCrop.size();
-
- /* Adjust aspect ratio by providing crops on the input image. */
- Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
- Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
- data->ispCrop_ = crop;
-
- data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
-
- ipa::RPi::IPAConfigResult result;
- ret = data->configureIPA(config, &result);
- if (ret)
- LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
-
- /*
- * Set the scaler crop to the value we are using (scaled to native sensor
- * coordinates).
- */
- data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
-
- /*
- * Configure the Unicam embedded data output format only if the sensor
- * supports it.
- */
- if (data->sensorMetadata_) {
- V4L2SubdeviceFormat embeddedFormat;
-
- data->sensor_->device()->getFormat(1, &embeddedFormat);
- format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
- format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
-
- LOG(RPI, Debug) << "Setting embedded data format.";
- ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
- << format;
- return ret;
- }
- }
-
- /*
- * Update the ScalerCropMaximum to the correct value for this camera mode.
- * For us, it's the same as the "analogue crop".
- *
- * \todo Make this property the ScalerCrop maximum value when dynamic
- * controls are available and set it at validate() time
- */
- data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
-
- /* Store the mode sensitivity for the application. */
- data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
-
- /* Update the controls that the Raspberry Pi IPA can handle. */
- ControlInfoMap::Map ctrlMap;
- for (auto const &c : result.controlInfo)
- ctrlMap.emplace(c.first, c.second);
-
- /* Add the ScalerCrop control limits based on the current mode. */
- ctrlMap.emplace(&controls::ScalerCrop,
- ControlInfo(Rectangle(data->ispMinCropSize_), Rectangle(data->sensorInfo_.outputSize)));
-
- data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
-
- /* Setup the Video Mux/Bridge entities. */
- for (auto &[device, link] : data->bridgeDevices_) {
- /*
- * Start by disabling all the sink pad links on the devices in the
- * cascade, with the exception of the link connecting the device.
- */
- for (const MediaPad *p : device->entity()->pads()) {
- if (!(p->flags() & MEDIA_PAD_FL_SINK))
- continue;
-
- for (MediaLink *l : p->links()) {
- if (l != link)
- l->setEnabled(false);
- }
- }
-
- /*
- * Next, enable the entity -> entity links, and setup the pad format.
- *
- * \todo Some bridge devices may chainge the media bus code, so we
- * ought to read the source pad format and propagate it to the sink pad.
- */
- link->setEnabled(true);
- const MediaPad *sinkPad = link->sink();
- ret = device->setFormat(sinkPad->index(), &sensorFormat);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
- << " pad " << sinkPad->index()
- << " with format " << format
- << ": " << ret;
- return ret;
- }
-
- LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
- << " on pad " << sinkPad->index();
- }
-
- return ret;
-}
-
-int PipelineHandlerRPi::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
- RPi::Stream *s = static_cast<RPi::Stream *>(stream);
- unsigned int count = stream->configuration().bufferCount;
- int ret = s->dev()->exportBuffers(count, buffers);
-
- s->setExportedBuffers(buffers);
-
- return ret;
-}
-
-int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- for (auto const stream : data->streams_)
- stream->resetBuffers();
-
- if (!data->buffersAllocated_) {
- /* Allocate buffers for internal pipeline usage. */
- ret = prepareBuffers(camera);
- if (ret) {
- LOG(RPI, Error) << "Failed to allocate buffers";
- data->freeBuffers();
- stop(camera);
- return ret;
- }
- data->buffersAllocated_ = true;
- }
-
- /* Check if a ScalerCrop control was specified. */
- if (controls)
- data->applyScalerCrop(*controls);
-
- /* Start the IPA. */
- ipa::RPi::StartConfig startConfig;
- data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
- &startConfig);
-
- /* Apply any gain/exposure settings that the IPA may have passed back. */
- if (!startConfig.controls.empty())
- data->setSensorControls(startConfig.controls);
-
- /* Configure the number of dropped frames required on startup. */
- data->dropFrameCount_ = startConfig.dropFrameCount;
-
- /* We need to set the dropFrameCount_ before queueing buffers. */
- ret = queueAllBuffers(camera);
- if (ret) {
- LOG(RPI, Error) << "Failed to queue buffers";
- stop(camera);
- return ret;
- }
-
- /* Enable SOF event generation. */
- data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(true);
-
- /*
- * Reset the delayed controls with the gain and exposure values set by
- * the IPA.
- */
- data->delayedCtrls_->reset();
-
- data->state_ = RPiCameraData::State::Idle;
-
- /* Start all streams. */
- for (auto const stream : data->streams_) {
- ret = stream->dev()->streamOn();
- if (ret) {
- stop(camera);
- return ret;
- }
- }
-
- /*
- * Set the dequeue timeout to the larger of 2x the maximum possible
- * frame duration or 1 second.
- */
- utils::Duration timeout =
- std::max<utils::Duration>(1s, 2 * startConfig.maxSensorFrameLengthMs * 1ms);
- data->unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
-
- return 0;
-}
-
-void PipelineHandlerRPi::stopDevice(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
-
- data->state_ = RPiCameraData::State::Stopped;
-
- /* Disable SOF event generation. */
- data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(false);
-
- for (auto const stream : data->streams_)
- stream->dev()->streamOff();
-
- data->clearIncompleteRequests();
- data->bayerQueue_ = {};
- data->embeddedQueue_ = {};
-
- /* Stop the IPA. */
- data->ipa_->stop();
-}
-
-int PipelineHandlerRPi::queueRequestDevice(Camera *camera, Request *request)
-{
- RPiCameraData *data = cameraData(camera);
-
- if (data->state_ == RPiCameraData::State::Stopped)
- return -EINVAL;
-
- LOG(RPI, Debug) << "queueRequestDevice: New request.";
-
- /* Push all buffers supplied in the Request to the respective streams. */
- for (auto stream : data->streams_) {
- if (!stream->isExternal())
- continue;
-
- FrameBuffer *buffer = request->findBuffer(stream);
- if (buffer && stream->getBufferId(buffer) == -1) {
- /*
- * This buffer is not recognised, so it must have been allocated
- * outside the v4l2 device. Store it in the stream buffer list
- * so we can track it.
- */
- stream->setExternalBuffer(buffer);
- }
- /*
- * If no buffer is provided by the request for this stream, we
- * queue a nullptr to the stream to signify that it must use an
- * internally allocated buffer for this capture request. This
- * buffer will not be given back to the application, but is used
- * to support the internal pipeline flow.
- *
- * The below queueBuffer() call will do nothing if there are not
- * enough internal buffers allocated, but this will be handled by
- * queuing the request for buffers in the RPiStream object.
- */
- int ret = stream->queueBuffer(buffer);
- if (ret)
- return ret;
- }
-
- /* Push the request to the back of the queue. */
- data->requestQueue_.push_back(request);
- data->handleState();
-
- return 0;
-}
-
-bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
-{
- DeviceMatch unicam("unicam");
- MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
-
- if (!unicamDevice) {
- LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
- return false;
- }
-
- DeviceMatch isp("bcm2835-isp");
- MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
-
- if (!ispDevice) {
- LOG(RPI, Debug) << "Unable to acquire ISP instance";
- return false;
- }
-
- /*
- * The loop below is used to register multiple cameras behind one or more
- * video mux devices that are attached to a particular Unicam instance.
- * Obviously these cameras cannot be used simultaneously.
- */
- unsigned int numCameras = 0;
- for (MediaEntity *entity : unicamDevice->entities()) {
- if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
- continue;
-
- int ret = registerCamera(unicamDevice, ispDevice, entity);
- if (ret)
- LOG(RPI, Error) << "Failed to register camera "
- << entity->name() << ": " << ret;
- else
- numCameras++;
- }
-
- return !!numCameras;
-}
-
-int PipelineHandlerRPi::registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity)
-{
- std::unique_ptr<RPiCameraData> data = std::make_unique<RPiCameraData>(this);
-
- if (!data->dmaHeap_.isValid())
- return -ENOMEM;
-
- MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
- MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
- MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
- MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
- MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
-
- if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
- return -ENOENT;
-
- /* Locate and open the unicam video streams. */
- data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
-
- /* An embedded data node will not be present if the sensor does not support it. */
- MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
- if (unicamEmbedded) {
- data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
- data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data.get(),
- &RPiCameraData::unicamBufferDequeue);
- }
-
- /* Tag the ISP input stream as an import stream. */
- data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, true);
- data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
- data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
- data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
-
- /* Wire up all the buffer connections. */
- data->unicam_[Unicam::Image].dev()->dequeueTimeout.connect(data.get(), &RPiCameraData::unicamTimeout);
- data->unicam_[Unicam::Image].dev()->frameStart.connect(data.get(), &RPiCameraData::frameStarted);
- data->unicam_[Unicam::Image].dev()->bufferReady.connect(data.get(), &RPiCameraData::unicamBufferDequeue);
- data->isp_[Isp::Input].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispInputDequeue);
- data->isp_[Isp::Output0].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
- data->isp_[Isp::Output1].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
- data->isp_[Isp::Stats].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
-
- data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
- if (!data->sensor_)
- return -EINVAL;
-
- if (data->sensor_->init())
- return -EINVAL;
-
- /*
- * Enumerate all the Video Mux/Bridge devices across the sensor -> unicam
- * chain. There may be a cascade of devices in this chain!
- */
- MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
- data->enumerateVideoDevices(link);
-
- data->sensorFormats_ = populateSensorFormats(data->sensor_);
-
- ipa::RPi::IPAInitResult result;
- if (data->loadIPA(&result)) {
- LOG(RPI, Error) << "Failed to load a suitable IPA library";
- return -EINVAL;
- }
-
- if (result.sensorConfig.sensorMetadata ^ !!unicamEmbedded) {
- LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
- result.sensorConfig.sensorMetadata = false;
- if (unicamEmbedded)
- data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
- }
-
- /*
- * Open all Unicam and ISP streams. The exception is the embedded data
- * stream, which only gets opened below if the IPA reports that the sensor
- * supports embedded data.
- *
- * The below grouping is just for convenience so that we can easily
- * iterate over all streams in one go.
- */
- data->streams_.push_back(&data->unicam_[Unicam::Image]);
- if (result.sensorConfig.sensorMetadata)
- data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
-
- for (auto &stream : data->isp_)
- data->streams_.push_back(&stream);
-
- for (auto stream : data->streams_) {
- int ret = stream->dev()->open();
- if (ret)
- return ret;
- }
-
- if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
- LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
- return -EINVAL;
- }
-
- /*
- * Setup our delayed control writer with the sensor default
- * gain and exposure delays. Mark VBLANK for priority write.
- */
- std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
- { V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
- { V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
- };
- data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
- data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
-
- /* Register initial controls that the Raspberry Pi IPA can handle. */
- data->controlInfo_ = std::move(result.controlInfo);
-
- /* Initialize the camera properties. */
- data->properties_ = data->sensor_->properties();
-
- /*
- * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
- * sensor of the colour gains. It is defined to be a linear gain where
- * the default value represents a gain of exactly one.
- */
- auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
- if (it != data->sensor_->controls().end())
- data->notifyGainsUnity_ = it->second.def().get<int32_t>();
-
- /*
- * Set a default value for the ScalerCropMaximum property to show
- * that we support its use, however, initialise it to zero because
- * it's not meaningful until a camera mode has been chosen.
- */
- data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
-
- /*
- * We cache three things about the sensor in relation to transforms
- * (meaning horizontal and vertical flips).
- *
- * Firstly, does it support them?
- * Secondly, if you use them does it affect the Bayer ordering?
- * Thirdly, what is the "native" Bayer order, when no transforms are
- * applied?
- *
- * We note that the sensor's cached list of supported formats is
- * already in the "native" order, with any flips having been undone.
- */
- const V4L2Subdevice *sensor = data->sensor_->device();
- const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
- if (hflipCtrl) {
- /* We assume it will support vflips too... */
- data->supportsFlips_ = true;
- data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
- }
-
- /* Look for a valid Bayer format. */
- BayerFormat bayerFormat;
- for (const auto &iter : data->sensorFormats_) {
- bayerFormat = BayerFormat::fromMbusCode(iter.first);
- if (bayerFormat.isValid())
- break;
- }
-
- if (!bayerFormat.isValid()) {
- LOG(RPI, Error) << "No Bayer format found";
- return -EINVAL;
- }
- data->nativeBayerOrder_ = bayerFormat.order;
-
- /*
- * List the available streams an application may request. At present, we
- * do not advertise Unicam Embedded and ISP Statistics streams, as there
- * is no mechanism for the application to request non-image buffer formats.
- */
- std::set<Stream *> streams;
- streams.insert(&data->unicam_[Unicam::Image]);
- streams.insert(&data->isp_[Isp::Output0]);
- streams.insert(&data->isp_[Isp::Output1]);
-
- /* Create and register the camera. */
- const std::string &id = data->sensor_->id();
- std::shared_ptr<Camera> camera =
- Camera::create(std::move(data), id, streams);
- PipelineHandler::registerCamera(std::move(camera));
-
- LOG(RPI, Info) << "Registered camera " << id
- << " to Unicam device " << unicam->deviceNode()
- << " and ISP device " << isp->deviceNode();
- return 0;
-}
-
-int PipelineHandlerRPi::queueAllBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- for (auto const stream : data->streams_) {
- if (!stream->isExternal()) {
- ret = stream->queueAllBuffers();
- if (ret < 0)
- return ret;
- } else {
- /*
- * For external streams, we must queue up a set of internal
- * buffers to handle the number of drop frames requested by
- * the IPA. This is done by passing nullptr in queueBuffer().
- *
- * The below queueBuffer() call will do nothing if there
- * are not enough internal buffers allocated, but this will
- * be handled by queuing the request for buffers in the
- * RPiStream object.
- */
- unsigned int i;
- for (i = 0; i < data->dropFrameCount_; i++) {
- ret = stream->queueBuffer(nullptr);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-int PipelineHandlerRPi::prepareBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
- unsigned int numRawBuffers = 0;
- int ret;
-
- for (Stream *s : camera->streams()) {
- if (isRaw(s->configuration().pixelFormat)) {
- numRawBuffers = s->configuration().bufferCount;
- break;
- }
- }
-
- /* Decide how many internal buffers to allocate. */
- for (auto const stream : data->streams_) {
- unsigned int numBuffers;
- /*
- * For Unicam, allocate a minimum of 4 buffers as we want
- * to avoid any frame drops.
- */
- constexpr unsigned int minBuffers = 4;
- if (stream == &data->unicam_[Unicam::Image]) {
- /*
- * If an application has configured a RAW stream, allocate
- * additional buffers to make up the minimum, but ensure
- * we have at least 2 sets of internal buffers to use to
- * minimise frame drops.
- */
- numBuffers = std::max<int>(2, minBuffers - numRawBuffers);
- } else if (stream == &data->isp_[Isp::Input]) {
- /*
- * ISP input buffers are imported from Unicam, so follow
- * similar logic as above to count all the RAW buffers
- * available.
- */
- numBuffers = numRawBuffers + std::max<int>(2, minBuffers - numRawBuffers);
-
- } else if (stream == &data->unicam_[Unicam::Embedded]) {
- /*
- * Embedded data buffers are (currently) for internal use,
- * so allocate the minimum required to avoid frame drops.
- */
- numBuffers = minBuffers;
- } else {
- /*
- * Since the ISP runs synchronous with the IPA and requests,
- * we only ever need one set of internal buffers. Any buffers
- * the application wants to hold onto will already be exported
- * through PipelineHandlerRPi::exportFrameBuffers().
- */
- numBuffers = 1;
- }
-
- ret = stream->prepareBuffers(numBuffers);
- if (ret < 0)
- return ret;
- }
-
- /*
- * Pass the stats and embedded data buffers to the IPA. No other
- * buffers need to be passed.
- */
- mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), ipa::RPi::MaskStats);
- if (data->sensorMetadata_)
- mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
- ipa::RPi::MaskEmbeddedData);
-
- return 0;
-}
-
-void PipelineHandlerRPi::mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask)
-{
- RPiCameraData *data = cameraData(camera);
- std::vector<IPABuffer> ipaBuffers;
- /*
- * Link the FrameBuffers with the id (key value) in the map stored in
- * the RPi stream object - along with an identifier mask.
- *
- * This will allow us to identify buffers passed between the pipeline
- * handler and the IPA.
- */
- for (auto const &it : buffers) {
- ipaBuffers.push_back(IPABuffer(mask | it.first,
- it.second->planes()));
- data->ipaBuffers_.insert(mask | it.first);
- }
-
- data->ipa_->mapBuffers(ipaBuffers);
-}
-
-void RPiCameraData::freeBuffers()
-{
- if (ipa_) {
- /*
- * Copy the buffer ids from the unordered_set to a vector to
- * pass to the IPA.
- */
- std::vector<unsigned int> ipaBuffers(ipaBuffers_.begin(),
- ipaBuffers_.end());
- ipa_->unmapBuffers(ipaBuffers);
- ipaBuffers_.clear();
- }
-
- for (auto const stream : streams_)
- stream->releaseBuffers();
-
- buffersAllocated_ = false;
-}
-
-void RPiCameraData::frameStarted(uint32_t sequence)
-{
- LOG(RPI, Debug) << "frame start " << sequence;
-
- /* Write any controls for the next frame as soon as we can. */
- delayedCtrls_->applyControls(sequence);
-}
-
-int RPiCameraData::loadIPA(ipa::RPi::IPAInitResult *result)
-{
- ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
-
- if (!ipa_)
- return -ENOENT;
-
- ipa_->statsMetadataComplete.connect(this, &RPiCameraData::statsMetadataComplete);
- ipa_->runIsp.connect(this, &RPiCameraData::runIsp);
- ipa_->embeddedComplete.connect(this, &RPiCameraData::embeddedComplete);
- ipa_->setIspControls.connect(this, &RPiCameraData::setIspControls);
- ipa_->setDelayedControls.connect(this, &RPiCameraData::setDelayedControls);
-
- /*
- * The configuration (tuning file) is made from the sensor name unless
- * the environment variable overrides it.
- */
- std::string configurationFile;
- char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
- if (!configFromEnv || *configFromEnv == '\0')
- configurationFile = ipa_->configurationFile(sensor_->model() + ".json");
- else
- configurationFile = std::string(configFromEnv);
-
- IPASettings settings(configurationFile, sensor_->model());
-
- return ipa_->init(settings, result);
-}
-
-int RPiCameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::IPAConfigResult *result)
-{
- std::map<unsigned int, IPAStream> streamConfig;
- std::map<unsigned int, ControlInfoMap> entityControls;
- ipa::RPi::IPAConfig ipaConfig;
-
- /* Inform IPA of stream configuration and sensor controls. */
- unsigned int i = 0;
- for (auto const &stream : isp_) {
- if (stream.isExternal()) {
- streamConfig[i++] = IPAStream(
- stream.configuration().pixelFormat,
- stream.configuration().size);
- }
- }
-
- entityControls.emplace(0, sensor_->controls());
- entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
-
- /* Always send the user transform to the IPA. */
- ipaConfig.transform = static_cast<unsigned int>(config->transform);
-
- /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
- if (!lsTable_.isValid()) {
- lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
- if (!lsTable_.isValid())
- return -ENOMEM;
-
- /* Allow the IPA to mmap the LS table via the file descriptor. */
- /*
- * \todo Investigate if mapping the lens shading table buffer
- * could be handled with mapBuffers().
- */
- ipaConfig.lsTableHandle = lsTable_;
- }
-
- /* We store the IPACameraSensorInfo for digital zoom calculations. */
- int ret = sensor_->sensorInfo(&sensorInfo_);
- if (ret) {
- LOG(RPI, Error) << "Failed to retrieve camera sensor info";
- return ret;
- }
-
- /* Ready the IPA - it must know about the sensor resolution. */
- ControlList controls;
- ret = ipa_->configure(sensorInfo_, streamConfig, entityControls, ipaConfig,
- &controls, result);
- if (ret < 0) {
- LOG(RPI, Error) << "IPA configuration failed!";
- return -EPIPE;
- }
-
- if (!controls.empty())
- setSensorControls(controls);
-
- return 0;
-}
-
-/*
- * enumerateVideoDevices() iterates over the Media Controller topology, starting
- * at the sensor and finishing at Unicam. For each sensor, RPiCameraData stores
- * a unique list of any intermediate video mux or bridge devices connected in a
- * cascade, together with the entity to entity link.
- *
- * Entity pad configuration and link enabling happens at the end of configure().
- * We first disable all pad links on each entity device in the chain, and then
- * selectively enabling the specific links to link sensor to Unicam across all
- * intermediate muxes and bridges.
- *
- * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
- * will be disabled, and Sensor1 -> Mux1 -> Unicam links enabled. Alternatively,
- * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
- * and Sensor3 -> Mux2 -> Mux1 -> Unicam links are enabled. All other links will
- * remain unchanged.
- *
- * +----------+
- * | Unicam |
- * +-----^----+
- * |
- * +---+---+
- * | Mux1 <-------+
- * +--^----+ |
- * | |
- * +-----+---+ +---+---+
- * | Sensor1 | | Mux2 |<--+
- * +---------+ +-^-----+ |
- * | |
- * +-------+-+ +---+-----+
- * | Sensor2 | | Sensor3 |
- * +---------+ +---------+
- */
-void RPiCameraData::enumerateVideoDevices(MediaLink *link)
-{
- const MediaPad *sinkPad = link->sink();
- const MediaEntity *entity = sinkPad->entity();
- bool unicamFound = false;
-
- /* We only deal with Video Mux and Bridge devices in cascade. */
- if (entity->function() != MEDIA_ENT_F_VID_MUX &&
- entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
- return;
-
- /* Find the source pad for this Video Mux or Bridge device. */
- const MediaPad *sourcePad = nullptr;
- for (const MediaPad *pad : entity->pads()) {
- if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
- /*
- * We can only deal with devices that have a single source
- * pad. If this device has multiple source pads, ignore it
- * and this branch in the cascade.
- */
- if (sourcePad)
- return;
-
- sourcePad = pad;
- }
- }
-
- LOG(RPI, Debug) << "Found video mux device " << entity->name()
- << " linked to sink pad " << sinkPad->index();
-
- bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
- bridgeDevices_.back().first->open();
-
- /*
- * Iterate through all the sink pad links down the cascade to find any
- * other Video Mux and Bridge devices.
- */
- for (MediaLink *l : sourcePad->links()) {
- enumerateVideoDevices(l);
- /* Once we reach the Unicam entity, we are done. */
- if (l->sink()->entity()->name() == "unicam-image") {
- unicamFound = true;
- break;
- }
- }
-
- /* This identifies the end of our entity enumeration recursion. */
- if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
- /*
- * If Unicam is not at the end of this cascade, we cannot configure
- * this topology automatically, so remove all entity references.
- */
- if (!unicamFound) {
- LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
- bridgeDevices_.clear();
- }
- }
-}
-
-void RPiCameraData::statsMetadataComplete(uint32_t bufferId, const ControlList &controls)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(bufferId);
-
- handleStreamBuffer(buffer, &isp_[Isp::Stats]);
-
- /* Add to the Request metadata buffer what the IPA has provided. */
- Request *request = requestQueue_.front();
- request->metadata().merge(controls);
-
- /*
- * Inform the sensor of the latest colour gains if it has the
- * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
- */
- if (notifyGainsUnity_ && controls.contains(libcamera::controls::ColourGains)) {
- libcamera::Span<const float> colourGains = controls.get(libcamera::controls::ColourGains);
- /* The control wants linear gains in the order B, Gb, Gr, R. */
- ControlList ctrls(sensor_->controls());
- std::array<int32_t, 4> gains{
- static_cast<int32_t>(colourGains[1] * *notifyGainsUnity_),
- *notifyGainsUnity_,
- *notifyGainsUnity_,
- static_cast<int32_t>(colourGains[0] * *notifyGainsUnity_)
- };
- ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
-
- sensor_->setControls(&ctrls);
- }
-
- state_ = State::IpaComplete;
- handleState();
-}
-
-void RPiCameraData::runIsp(uint32_t bufferId)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = unicam_[Unicam::Image].getBuffers().at(bufferId);
-
- LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << bufferId
- << ", timestamp: " << buffer->metadata().timestamp;
-
- isp_[Isp::Input].queueBuffer(buffer);
- ispOutputCount_ = 0;
- handleState();
-}
-
-void RPiCameraData::embeddedComplete(uint32_t bufferId)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = unicam_[Unicam::Embedded].getBuffers().at(bufferId);
- handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
- handleState();
-}
-
-void RPiCameraData::setIspControls(const ControlList &controls)
-{
- ControlList ctrls = controls;
-
- if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
- ControlValue &value =
- const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
- Span<uint8_t> s = value.data();
- bcm2835_isp_lens_shading *ls =
- reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
- ls->dmabuf = lsTable_.get();
- }
-
- isp_[Isp::Input].dev()->setControls(&ctrls);
- handleState();
-}
-
-void RPiCameraData::setDelayedControls(const ControlList &controls)
-{
- if (!delayedCtrls_->push(controls))
- LOG(RPI, Error) << "V4L2 DelayedControl set failed";
- handleState();
-}
-
-void RPiCameraData::setSensorControls(ControlList &controls)
-{
- /*
- * We need to ensure that if both VBLANK and EXPOSURE are present, the
- * former must be written ahead of, and separately from EXPOSURE to avoid
- * V4L2 rejecting the latter. This is identical to what DelayedControls
- * does with the priority write flag.
- *
- * As a consequence of the below logic, VBLANK gets set twice, and we
- * rely on the v4l2 framework to not pass the second control set to the
- * driver as the actual control value has not changed.
- */
- if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
- ControlList vblank_ctrl;
-
- vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
- sensor_->setControls(&vblank_ctrl);
- }
-
- sensor_->setControls(&controls);
-}
-
-void RPiCameraData::unicamTimeout()
-{
- LOG(RPI, Error) << "Unicam has timed out!";
- LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
- LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
-}
-
-void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
-{
- RPi::Stream *stream = nullptr;
- int index;
-
- if (state_ == State::Stopped)
- return;
-
- for (RPi::Stream &s : unicam_) {
- index = s.getBufferId(buffer);
- if (index != -1) {
- stream = &s;
- break;
- }
- }
-
- /* The buffer must belong to one of our streams. */
- ASSERT(stream);
-
- LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
- << ", buffer id " << index
- << ", timestamp: " << buffer->metadata().timestamp;
-
- if (stream == &unicam_[Unicam::Image]) {
- /*
- * Lookup the sensor controls used for this frame sequence from
- * DelayedControl and queue them along with the frame buffer.
- */
- ControlList ctrl = delayedCtrls_->get(buffer->metadata().sequence);
- /*
- * Add the frame timestamp to the ControlList for the IPA to use
- * as it does not receive the FrameBuffer object.
- */
- ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
- bayerQueue_.push({ buffer, std::move(ctrl) });
- } else {
- embeddedQueue_.push(buffer);
- }
-
- handleState();
-}
-
-void RPiCameraData::ispInputDequeue(FrameBuffer *buffer)
-{
- if (state_ == State::Stopped)
- return;
-
- LOG(RPI, Debug) << "Stream ISP Input buffer complete"
- << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
- << ", timestamp: " << buffer->metadata().timestamp;
-
- /* The ISP input buffer gets re-queued into Unicam. */
- handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
- handleState();
-}
-
-void RPiCameraData::ispOutputDequeue(FrameBuffer *buffer)
-{
- RPi::Stream *stream = nullptr;
- int index;
-
- if (state_ == State::Stopped)
- return;
-
- for (RPi::Stream &s : isp_) {
- index = s.getBufferId(buffer);
- if (index != -1) {
- stream = &s;
- break;
- }
- }
-
- /* The buffer must belong to one of our ISP output streams. */
- ASSERT(stream);
-
- LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
- << ", buffer id " << index
- << ", timestamp: " << buffer->metadata().timestamp;
-
- /*
- * ISP statistics buffer must not be re-queued or sent back to the
- * application until after the IPA signals so.
- */
- if (stream == &isp_[Isp::Stats]) {
- ipa_->signalStatReady(ipa::RPi::MaskStats | static_cast<unsigned int>(index));
- } else {
- /* Any other ISP output can be handed back to the application now. */
- handleStreamBuffer(buffer, stream);
- }
-
- /*
- * Increment the number of ISP outputs generated.
- * This is needed to track dropped frames.
- */
- ispOutputCount_++;
-
- handleState();
-}
-
-void RPiCameraData::clearIncompleteRequests()
-{
- /*
- * All outstanding requests (and associated buffers) must be returned
- * back to the application.
- */
- while (!requestQueue_.empty()) {
- Request *request = requestQueue_.front();
-
- for (auto &b : request->buffers()) {
- FrameBuffer *buffer = b.second;
- /*
- * Has the buffer already been handed back to the
- * request? If not, do so now.
- */
- if (buffer->request()) {
- buffer->_d()->cancel();
- pipe()->completeBuffer(request, buffer);
- }
- }
-
- pipe()->completeRequest(request);
- requestQueue_.pop_front();
- }
-}
-
-void RPiCameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
- /*
- * It is possible to be here without a pending request, so check
- * that we actually have one to action, otherwise we just return
- * buffer back to the stream.
- */
- Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
- if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
- /*
- * Check if this is an externally provided buffer, and if
- * so, we must stop tracking it in the pipeline handler.
- */
- handleExternalBuffer(buffer, stream);
- /*
- * Tag the buffer as completed, returning it to the
- * application.
- */
- pipe()->completeBuffer(request, buffer);
- } else {
- /*
- * This buffer was not part of the Request (which happens if an
- * internal buffer was used for an external stream, or
- * unconditionally for internal streams), or there is no pending
- * request, so we can recycle it.
- */
- stream->returnBuffer(buffer);
- }
-}
-
-void RPiCameraData::handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
- unsigned int id = stream->getBufferId(buffer);
-
- if (!(id & ipa::RPi::MaskExternalBuffer))
- return;
-
- /* Stop the Stream object from tracking the buffer. */
- stream->removeExternalBuffer(buffer);
-}
-
-void RPiCameraData::handleState()
-{
- switch (state_) {
- case State::Stopped:
- case State::Busy:
- break;
-
- case State::IpaComplete:
- /* If the request is completed, we will switch to Idle state. */
- checkRequestCompleted();
- /*
- * No break here, we want to try running the pipeline again.
- * The fallthrough clause below suppresses compiler warnings.
- */
- [[fallthrough]];
-
- case State::Idle:
- tryRunPipeline();
- break;
- }
-}
-
-void RPiCameraData::checkRequestCompleted()
-{
- bool requestCompleted = false;
- /*
- * If we are dropping this frame, do not touch the request, simply
- * change the state to IDLE when ready.
- */
- if (!dropFrameCount_) {
- Request *request = requestQueue_.front();
- if (request->hasPendingBuffers())
- return;
-
- /* Must wait for metadata to be filled in before completing. */
- if (state_ != State::IpaComplete)
- return;
-
- pipe()->completeRequest(request);
- requestQueue_.pop_front();
- requestCompleted = true;
- }
-
- /*
- * Make sure we have three outputs completed in the case of a dropped
- * frame.
- */
- if (state_ == State::IpaComplete &&
- ((ispOutputCount_ == 3 && dropFrameCount_) || requestCompleted)) {
- state_ = State::Idle;
- if (dropFrameCount_) {
- dropFrameCount_--;
- LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
- << dropFrameCount_ << " left)";
- }
- }
-}
-
-Rectangle RPiCameraData::scaleIspCrop(const Rectangle &ispCrop) const
-{
- /*
- * Scale a crop rectangle defined in the ISP's coordinates into native sensor
- * coordinates.
- */
- Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
- sensorInfo_.outputSize);
- nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
- return nativeCrop;
-}
-
-void RPiCameraData::applyScalerCrop(const ControlList &controls)
-{
- if (controls.contains(controls::ScalerCrop)) {
- Rectangle nativeCrop = controls.get<Rectangle>(controls::ScalerCrop);
-
- if (!nativeCrop.width || !nativeCrop.height)
- nativeCrop = { 0, 0, 1, 1 };
-
- /* Create a version of the crop scaled to ISP (camera mode) pixels. */
- Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
- ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
-
- /*
- * The crop that we set must be:
- * 1. At least as big as ispMinCropSize_, once that's been
- * enlarged to the same aspect ratio.
- * 2. With the same mid-point, if possible.
- * 3. But it can't go outside the sensor area.
- */
- Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
- Size size = ispCrop.size().expandedTo(minSize);
- ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
-
- if (ispCrop != ispCrop_) {
- isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop);
- ispCrop_ = ispCrop;
-
- /*
- * Also update the ScalerCrop in the metadata with what we actually
- * used. But we must first rescale that from ISP (camera mode) pixels
- * back into sensor native pixels.
- */
- scalerCrop_ = scaleIspCrop(ispCrop_);
- }
- }
-}
-
-void RPiCameraData::fillRequestMetadata(const ControlList &bufferControls,
- Request *request)
-{
- request->metadata().set(controls::SensorTimestamp,
- bufferControls.get(controls::SensorTimestamp));
-
- request->metadata().set(controls::ScalerCrop, scalerCrop_);
-}
-
-void RPiCameraData::tryRunPipeline()
-{
- FrameBuffer *embeddedBuffer;
- BayerFrame bayerFrame;
-
- /* If any of our request or buffer queues are empty, we cannot proceed. */
- if (state_ != State::Idle || requestQueue_.empty() ||
- bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
- return;
-
- if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
- return;
-
- /* Take the first request from the queue and action the IPA. */
- Request *request = requestQueue_.front();
-
- /* See if a new ScalerCrop value needs to be applied. */
- applyScalerCrop(request->controls());
-
- /*
- * Clear the request metadata and fill it with some initial non-IPA
- * related controls. We clear it first because the request metadata
- * may have been populated if we have dropped the previous frame.
- */
- request->metadata().clear();
- fillRequestMetadata(bayerFrame.controls, request);
-
- /*
- * Process all the user controls by the IPA. Once this is complete, we
- * queue the ISP output buffer listed in the request to start the HW
- * pipeline.
- */
- ipa_->signalQueueRequest(request->controls());
-
- /* Set our state to say the pipeline is active. */
- state_ = State::Busy;
-
- unsigned int bayerId = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
-
- LOG(RPI, Debug) << "Signalling signalIspPrepare:"
- << " Bayer buffer id: " << bayerId;
-
- ipa::RPi::ISPConfig ispPrepare;
- ispPrepare.bayerBufferId = ipa::RPi::MaskBayerData | bayerId;
- ispPrepare.controls = std::move(bayerFrame.controls);
-
- if (embeddedBuffer) {
- unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
-
- ispPrepare.embeddedBufferId = ipa::RPi::MaskEmbeddedData | embeddedId;
- ispPrepare.embeddedBufferPresent = true;
-
- LOG(RPI, Debug) << "Signalling signalIspPrepare:"
- << " Embedded buffer id: " << embeddedId;
- }
-
- ipa_->signalIspPrepare(ispPrepare);
-}
-
-bool RPiCameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
-{
- if (bayerQueue_.empty())
- return false;
-
- /* Start with the front of the bayer queue. */
- bayerFrame = std::move(bayerQueue_.front());
- bayerQueue_.pop();
-
- /*
- * Find the embedded data buffer with a matching timestamp to pass to
- * the IPA. Any embedded buffers with a timestamp lower than the
- * current bayer buffer will be removed and re-queued to the driver.
- */
- uint64_t ts = bayerFrame.buffer->metadata().timestamp;
- embeddedBuffer = nullptr;
- while (!embeddedQueue_.empty()) {
- FrameBuffer *b = embeddedQueue_.front();
- if (b->metadata().timestamp < ts) {
- embeddedQueue_.pop();
- unicam_[Unicam::Embedded].returnBuffer(b);
- LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
- << unicam_[Unicam::Embedded].name();
- } else if (b->metadata().timestamp == ts) {
- /* Found a match! */
- embeddedBuffer = b;
- embeddedQueue_.pop();
- break;
- } else {
- break; /* Only higher timestamps from here. */
- }
- }
-
- if (!embeddedBuffer && sensorMetadata_) {
- /* Log if there is no matching embedded data buffer found. */
- LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
- }
-
- return true;
-}
-
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRPi)
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/rpi_stream.h b/src/libcamera/pipeline/raspberrypi/rpi_stream.h
deleted file mode 100644
index fe011100..00000000
--- a/src/libcamera/pipeline/raspberrypi/rpi_stream.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
- *
- * rpi_stream.h - Raspberry Pi device stream abstraction class.
- */
-
-#pragma once
-
-#include <queue>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/stream.h>
-
-#include "libcamera/internal/v4l2_videodevice.h"
-
-namespace libcamera {
-
-namespace RPi {
-
-using BufferMap = std::unordered_map<unsigned int, FrameBuffer *>;
-
-/*
- * Device stream abstraction for either an internal or external stream.
- * Used for both Unicam and the ISP.
- */
-class Stream : public libcamera::Stream
-{
-public:
- Stream()
- : id_(ipa::RPi::MaskID)
- {
- }
-
- Stream(const char *name, MediaEntity *dev, bool importOnly = false)
- : external_(false), importOnly_(importOnly), name_(name),
- dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(ipa::RPi::MaskID)
- {
- }
-
- V4L2VideoDevice *dev() const;
- std::string name() const;
- bool isImporter() const;
- void resetBuffers();
-
- void setExternal(bool external);
- bool isExternal() const;
-
- void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- const BufferMap &getBuffers() const;
- int getBufferId(FrameBuffer *buffer) const;
-
- void setExternalBuffer(FrameBuffer *buffer);
- void removeExternalBuffer(FrameBuffer *buffer);
-
- int prepareBuffers(unsigned int count);
- int queueBuffer(FrameBuffer *buffer);
- void returnBuffer(FrameBuffer *buffer);
-
- int queueAllBuffers();
- void releaseBuffers();
-
-private:
- class IdGenerator
- {
- public:
- IdGenerator(int max)
- : max_(max), id_(0)
- {
- }
-
- int get()
- {
- int id;
- if (!recycle_.empty()) {
- id = recycle_.front();
- recycle_.pop();
- } else {
- id = id_++;
- ASSERT(id_ <= max_);
- }
- return id;
- }
-
- void release(int id)
- {
- recycle_.push(id);
- }
-
- void reset()
- {
- id_ = 0;
- recycle_ = {};
- }
-
- private:
- int max_;
- int id_;
- std::queue<int> recycle_;
- };
-
- void clearBuffers();
- int queueToDevice(FrameBuffer *buffer);
-
- /*
- * Indicates that this stream is active externally, i.e. the buffers
- * might be provided by (and returned to) the application.
- */
- bool external_;
-
- /* Indicates that this stream only imports buffers, e.g. ISP input. */
- bool importOnly_;
-
- /* Stream name identifier. */
- std::string name_;
-
- /* The actual device stream. */
- std::unique_ptr<V4L2VideoDevice> dev_;
-
- /* Tracks a unique id key for the bufferMap_ */
- IdGenerator id_;
-
- /* All frame buffers associated with this device stream. */
- BufferMap bufferMap_;
-
- /*
- * List of frame buffers that we can use if none have been provided by
- * the application for external streams. This is populated by the
- * buffers exported internally.
- */
- std::queue<FrameBuffer *> availableBuffers_;
-
- /*
- * List of frame buffers that are to be queued into the device from a Request.
- * A nullptr indicates any internal buffer can be used (from availableBuffers_),
- * whereas a valid pointer indicates an external buffer to be queued.
- *
- * Ordering buffers to be queued is important here as it must match the
- * requests coming from the application.
- */
- std::queue<FrameBuffer *> requestBuffers_;
-
- /*
- * This is a list of buffers exported internally. Need to keep this around
- * as the stream needs to maintain ownership of these buffers.
- */
- std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
-};
-
-/*
- * The following class is just a convenient (and typesafe) array of device
- * streams indexed with an enum class.
- */
-template<typename E, std::size_t N>
-class Device : public std::array<class Stream, N>
-{
-private:
- constexpr auto index(E e) const noexcept
- {
- return static_cast<std::underlying_type_t<E>>(e);
- }
-public:
- Stream &operator[](E e)
- {
- return std::array<class Stream, N>::operator[](index(e));
- }
- const Stream &operator[](E e) const
- {
- return std::array<class Stream, N>::operator[](index(e));
- }
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 3dc0850c..4cbf105d 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - Pipeline handler for Rockchip ISP1
+ * Pipeline handler for Rockchip ISP1
*/
#include <algorithm>
@@ -13,24 +13,29 @@
#include <queue>
#include <linux/media-bus-format.h>
+#include <linux/rkisp1-config.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
+#include <libcamera/color_space.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+#include <libcamera/transform.h>
+
#include <libcamera/ipa/core_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_proxy.h>
-#include <libcamera/request.h>
-#include <libcamera/stream.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
@@ -64,7 +69,8 @@ class RkISP1Frames
public:
RkISP1Frames(PipelineHandler *pipe);
- RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request);
+ RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request,
+ bool isRaw);
int destroy(unsigned int frame);
void clear();
@@ -119,6 +125,7 @@ public:
Status validate() override;
const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
+ const Transform &combinedTransform() { return combinedTransform_; }
private:
bool fitsAllPaths(const StreamConfiguration &cfg);
@@ -132,6 +139,7 @@ private:
const RkISP1CameraData *data_;
V4L2SubdeviceFormat sensorFormat_;
+ Transform combinedTransform_;
};
class PipelineHandlerRkISP1 : public PipelineHandler
@@ -139,8 +147,8 @@ class PipelineHandlerRkISP1 : public PipelineHandler
public:
PipelineHandlerRkISP1(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -154,6 +162,8 @@ public:
bool match(DeviceEnumerator *enumerator) override;
private:
+ static constexpr Size kRkISP1PreviewSize = { 1920, 1080 };
+
RkISP1CameraData *cameraData(Camera *camera)
{
return static_cast<RkISP1CameraData *>(camera->_d());
@@ -165,7 +175,7 @@ private:
int initLinks(Camera *camera, const CameraSensor *sensor,
const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
- void tryCompleteRequest(Request *request);
+ void tryCompleteRequest(RkISP1FrameInfo *info);
void bufferReady(FrameBuffer *buffer);
void paramReady(FrameBuffer *buffer);
void statReady(FrameBuffer *buffer);
@@ -178,6 +188,10 @@ private:
std::unique_ptr<V4L2Subdevice> isp_;
std::unique_ptr<V4L2VideoDevice> param_;
std::unique_ptr<V4L2VideoDevice> stat_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+
+ bool hasSelfPath_;
+ bool isRaw_;
RkISP1MainPath mainPath_;
RkISP1SelfPath selfPath_;
@@ -188,6 +202,8 @@ private:
std::queue<FrameBuffer *> availableStatBuffers_;
Camera *activeCamera_;
+
+ const MediaPad *ispSink_;
};
RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
@@ -195,28 +211,35 @@ RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
{
}
-RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request)
+RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request,
+ bool isRaw)
{
unsigned int frame = data->frame_;
- if (pipe_->availableParamBuffers_.empty()) {
- LOG(RkISP1, Error) << "Parameters buffer underrun";
- return nullptr;
- }
- FrameBuffer *paramBuffer = pipe_->availableParamBuffers_.front();
+ FrameBuffer *paramBuffer = nullptr;
+ FrameBuffer *statBuffer = nullptr;
- if (pipe_->availableStatBuffers_.empty()) {
- LOG(RkISP1, Error) << "Statisitc buffer underrun";
- return nullptr;
+ if (!isRaw) {
+ if (pipe_->availableParamBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Parameters buffer underrun";
+ return nullptr;
+ }
+
+ if (pipe_->availableStatBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Statistic buffer underrun";
+ return nullptr;
+ }
+
+ paramBuffer = pipe_->availableParamBuffers_.front();
+ pipe_->availableParamBuffers_.pop();
+
+ statBuffer = pipe_->availableStatBuffers_.front();
+ pipe_->availableStatBuffers_.pop();
}
- FrameBuffer *statBuffer = pipe_->availableStatBuffers_.front();
FrameBuffer *mainPathBuffer = request->findBuffer(&data->mainPathStream_);
FrameBuffer *selfPathBuffer = request->findBuffer(&data->selfPathStream_);
- pipe_->availableParamBuffers_.pop();
- pipe_->availableStatBuffers_.pop();
-
RkISP1FrameInfo *info = new RkISP1FrameInfo;
info->frame = frame;
@@ -323,7 +346,7 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
/*
* The API tuning file is made from the sensor name unless the
- * environment variable overrides it. If
+ * environment variable overrides it.
*/
std::string ipaTuningFile;
char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE");
@@ -339,7 +362,15 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
ipaTuningFile = std::string(configFromEnv);
}
- int ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision);
+ IPACameraSensorInfo sensorInfo{};
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(RkISP1, Error) << "Camera sensor information not available";
+ return ret;
+ }
+
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision,
+ sensorInfo, sensor_->controls(), &controlInfo_);
if (ret < 0) {
LOG(RkISP1, Error) << "IPA initialization failure";
return ret;
@@ -355,13 +386,15 @@ void RkISP1CameraData::paramFilled(unsigned int frame)
if (!info)
return;
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct rkisp1_params_cfg);
pipe->param_->queueBuffer(info->paramBuffer);
pipe->stat_->queueBuffer(info->statBuffer);
if (info->mainPathBuffer)
mainPath_->queueBuffer(info->mainPathBuffer);
- if (info->selfPathBuffer)
+ if (selfPath_ && info->selfPathBuffer)
selfPath_->queueBuffer(info->selfPathBuffer);
}
@@ -380,9 +413,33 @@ void RkISP1CameraData::metadataReady(unsigned int frame, const ControlList &meta
info->request->metadata().merge(metadata);
info->metadataProcessed = true;
- pipe()->tryCompleteRequest(info->request);
+ pipe()->tryCompleteRequest(info);
}
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1_path.cpp. */
+const std::map<PixelFormat, uint32_t> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
RkISP1CameraData *data)
: CameraConfiguration()
@@ -393,14 +450,15 @@ RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
{
+ const CameraSensor *sensor = data_->sensor_.get();
StreamConfiguration config;
config = cfg;
- if (data_->mainPath_->validate(&config) != Valid)
+ if (data_->mainPath_->validate(sensor, &config) != Valid)
return false;
config = cfg;
- if (data_->selfPath_->validate(&config) != Valid)
+ if (data_->selfPath_ && data_->selfPath_->validate(sensor, &config) != Valid)
return false;
return true;
@@ -409,20 +467,38 @@ bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
CameraConfiguration::Status RkISP1CameraConfiguration::validate()
{
const CameraSensor *sensor = data_->sensor_.get();
- Status status = Valid;
+ unsigned int pathCount = data_->selfPath_ ? 2 : 1;
+ Status status;
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > pathCount) {
+ config_.resize(pathCount);
status = Adjusted;
}
- /* Cap the number of entries to the available streams. */
- if (config_.size() > 2) {
- config_.resize(2);
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
+
+ /*
+ * Simultaneous capture of raw and processed streams isn't possible. If
+ * there is any raw stream, cap the number of streams to one.
+ */
+ if (config_.size() > 1) {
+ for (const auto &cfg : config_) {
+ if (PixelFormatInfo::info(cfg.pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW) {
+ config_.resize(1);
+ status = Adjusted;
+ break;
+ }
+ }
}
/*
@@ -438,14 +514,14 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
std::reverse(order.begin(), order.end());
bool mainPathAvailable = true;
- bool selfPathAvailable = true;
+ bool selfPathAvailable = data_->selfPath_;
for (unsigned int index : order) {
StreamConfiguration &cfg = config_[index];
/* Try to match stream without adjusting configuration. */
if (mainPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(&tryCfg) == Valid) {
+ if (data_->mainPath_->validate(sensor, &tryCfg) == Valid) {
mainPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
@@ -455,7 +531,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
if (selfPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(&tryCfg) == Valid) {
+ if (data_->selfPath_->validate(sensor, &tryCfg) == Valid) {
selfPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
@@ -466,7 +542,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
/* Try to match stream allowing adjusting configuration. */
if (mainPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(&tryCfg) == Adjusted) {
+ if (data_->mainPath_->validate(sensor, &tryCfg) == Adjusted) {
mainPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
@@ -477,7 +553,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
if (selfPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(&tryCfg) == Adjusted) {
+ if (data_->selfPath_->validate(sensor, &tryCfg) == Adjusted) {
selfPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
@@ -486,86 +562,147 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
}
}
- /* All paths rejected configuraiton. */
+ /* All paths rejected configuration. */
LOG(RkISP1, Debug) << "Camera configuration not supported "
<< cfg.toString();
return Invalid;
}
/* Select the sensor format. */
+ PixelFormat rawFormat;
Size maxSize;
- for (const StreamConfiguration &cfg : config_)
+
+ for (const StreamConfiguration &cfg : config_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ rawFormat = cfg.pixelFormat;
+
maxSize = std::max(maxSize, cfg.size);
+ }
+
+ std::vector<unsigned int> mbusCodes;
+
+ if (rawFormat.isValid()) {
+ mbusCodes = { rawFormats.at(rawFormat) };
+ } else {
+ std::transform(rawFormats.begin(), rawFormats.end(),
+ std::back_inserter(mbusCodes),
+ [](const auto &value) { return value.second; });
+ }
+
+ sensorFormat_ = sensor->getFormat(mbusCodes, maxSize);
- sensorFormat_ = sensor->getFormat({ MEDIA_BUS_FMT_SBGGR12_1X12,
- MEDIA_BUS_FMT_SGBRG12_1X12,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8 },
- maxSize);
if (sensorFormat_.size.isNull())
sensorFormat_.size = sensor->resolution();
return status;
}
-PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
- : PipelineHandler(manager)
-{
-}
-
/* -----------------------------------------------------------------------------
* Pipeline Operations
*/
-CameraConfiguration *PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
+ : PipelineHandler(manager), hasSelfPath_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
RkISP1CameraData *data = cameraData(camera);
- CameraConfiguration *config = new RkISP1CameraConfiguration(camera, data);
+
+ unsigned int pathCount = data->selfPath_ ? 2 : 1;
+ if (roles.size() > pathCount) {
+ LOG(RkISP1, Error) << "Too many stream roles requested";
+ return nullptr;
+ }
+
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RkISP1CameraConfiguration>(camera, data);
if (roles.empty())
return config;
+ /*
+ * As the ISP can't output different color spaces for the main and self
+ * path, pick a sensible default color space based on the role of the
+ * first stream and use it for all streams.
+ */
+ std::optional<ColorSpace> colorSpace;
bool mainPathAvailable = true;
- bool selfPathAvailable = true;
+
for (const StreamRole role : roles) {
- bool useMainPath;
+ Size size;
switch (role) {
- case StreamRole::StillCapture: {
- useMainPath = mainPathAvailable;
+ case StreamRole::StillCapture:
+ /* JPEG encoders typically expect sYCC. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = data->sensor_->resolution();
break;
- }
+
case StreamRole::Viewfinder:
- case StreamRole::VideoRecording: {
- useMainPath = !selfPathAvailable;
+ /*
+ * sYCC is the YCbCr encoding of sRGB, which is commonly
+ * used by displays.
+ */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = kRkISP1PreviewSize;
break;
- }
+
+ case StreamRole::VideoRecording:
+ /* Rec. 709 is a good default for HD video recording. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Rec709;
+
+ size = kRkISP1PreviewSize;
+ break;
+
+ case StreamRole::Raw:
+ if (roles.size() > 1) {
+ LOG(RkISP1, Error)
+ << "Can't capture both raw and processed streams";
+ return nullptr;
+ }
+
+ colorSpace = ColorSpace::Raw;
+ size = data->sensor_->resolution();
+ break;
+
default:
LOG(RkISP1, Warning)
<< "Requested stream role not supported: " << role;
- delete config;
return nullptr;
}
- StreamConfiguration cfg;
- if (useMainPath) {
- cfg = data->mainPath_->generateConfiguration(
- data->sensor_->resolution());
+ /*
+ * Prefer the main path if available, as it supports higher
+ * resolutions.
+ *
+ * \todo Using the main path unconditionally hides support for
+ * RGB (only available on the self path) in the streams formats
+ * exposed to applications. This likely calls for a better API
+ * to expose streams capabilities.
+ */
+ RkISP1Path *path;
+ if (mainPathAvailable) {
+ path = data->mainPath_;
mainPathAvailable = false;
} else {
- cfg = data->selfPath_->generateConfiguration(
- data->sensor_->resolution());
- selfPathAvailable = false;
+ path = data->selfPath_;
}
+ StreamConfiguration cfg =
+ path->generateConfiguration(data->sensor_.get(), size, role);
+ if (!cfg.pixelFormat.isValid())
+ return nullptr;
+
+ cfg.colorSpace = colorSpace;
config->addConfiguration(cfg);
}
@@ -593,12 +730,18 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
V4L2SubdeviceFormat format = config->sensorFormat();
LOG(RkISP1, Debug) << "Configuring sensor with " << format;
- ret = sensor->setFormat(&format);
+ ret = sensor->setFormat(&format, config->combinedTransform());
if (ret < 0)
return ret;
LOG(RkISP1, Debug) << "Sensor configured with " << format;
+ if (csi_) {
+ ret = csi_->setFormat(0, &format);
+ if (ret < 0)
+ return ret;
+ }
+
ret = isp_->setFormat(0, &format);
if (ret < 0)
return ret;
@@ -612,8 +755,14 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
<< "ISP input pad configured with " << format
<< " crop " << rect;
+ const PixelFormat &streamFormat = config->at(0).pixelFormat;
+ const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat);
+ isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+
/* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
- format.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+ if (!isRaw_)
+ format.code = MEDIA_BUS_FMT_YUYV8_2X8;
+
LOG(RkISP1, Debug)
<< "Configuring ISP output pad with " << format
<< " crop " << rect;
@@ -622,6 +771,7 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
if (ret < 0)
return ret;
+ format.colorSpace = config->at(0).colorSpace;
ret = isp_->setFormat(2, &format);
if (ret < 0)
return ret;
@@ -637,10 +787,12 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
ret = mainPath_.configure(cfg, format);
streamConfig[0] = IPAStream(cfg.pixelFormat,
cfg.size);
- } else {
+ } else if (hasSelfPath_) {
ret = selfPath_.configure(cfg, format);
streamConfig[1] = IPAStream(cfg.pixelFormat,
cfg.size);
+ } else {
+ return -ENODEV;
}
if (ret)
@@ -660,19 +812,15 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
return ret;
/* Inform IPA of stream configuration and sensor controls. */
- IPACameraSensorInfo sensorInfo = {};
- ret = data->sensor_->sensorInfo(&sensorInfo);
- if (ret) {
- /* \todo Turn this into a hard failure. */
- LOG(RkISP1, Warning) << "Camera sensor information not available";
- sensorInfo = {};
- ret = 0;
- }
+ ipa::rkisp1::IPAConfigInfo ipaConfig{};
+
+ ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
- std::map<uint32_t, ControlInfoMap> entityControls;
- entityControls.emplace(0, data->sensor_->controls());
+ ipaConfig.sensorControls = data->sensor_->controls();
- ret = data->ipa_->configure(sensorInfo, streamConfig, entityControls);
+ ret = data->ipa_->configure(ipaConfig, streamConfig, &data->controlInfo_);
if (ret) {
LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")";
return ret;
@@ -688,7 +836,7 @@ int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, S
if (stream == &data->mainPathStream_)
return mainPath_.exportBuffers(count, buffers);
- else if (stream == &data->selfPathStream_)
+ else if (hasSelfPath_ && stream == &data->selfPathStream_)
return selfPath_.exportBuffers(count, buffers);
return -EINVAL;
@@ -705,13 +853,15 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
data->selfPathStream_.configuration().bufferCount,
});
- ret = param_->allocateBuffers(maxCount, &paramBuffers_);
- if (ret < 0)
- goto error;
+ if (!isRaw_) {
+ ret = param_->allocateBuffers(maxCount, &paramBuffers_);
+ if (ret < 0)
+ goto error;
- ret = stat_->allocateBuffers(maxCount, &statBuffers_);
- if (ret < 0)
- goto error;
+ ret = stat_->allocateBuffers(maxCount, &statBuffers_);
+ if (ret < 0)
+ goto error;
+ }
for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) {
buffer->setCookie(ipaBufferId++);
@@ -787,23 +937,25 @@ int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlL
data->frame_ = 0;
- ret = param_->streamOn();
- if (ret) {
- data->ipa_->stop();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start parameters " << camera->id();
- return ret;
- }
+ if (!isRaw_) {
+ ret = param_->streamOn();
+ if (ret) {
+ data->ipa_->stop();
+ freeBuffers(camera);
+ LOG(RkISP1, Error)
+ << "Failed to start parameters " << camera->id();
+ return ret;
+ }
- ret = stat_->streamOn();
- if (ret) {
- param_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start statistics " << camera->id();
- return ret;
+ ret = stat_->streamOn();
+ if (ret) {
+ param_->streamOff();
+ data->ipa_->stop();
+ freeBuffers(camera);
+ LOG(RkISP1, Error)
+ << "Failed to start statistics " << camera->id();
+ return ret;
+ }
}
if (data->mainPath_->isEnabled()) {
@@ -817,7 +969,7 @@ int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlL
}
}
- if (data->selfPath_->isEnabled()) {
+ if (hasSelfPath_ && data->selfPath_->isEnabled()) {
ret = selfPath_.start();
if (ret) {
mainPath_.stop();
@@ -844,18 +996,21 @@ void PipelineHandlerRkISP1::stopDevice(Camera *camera)
data->ipa_->stop();
- selfPath_.stop();
+ if (hasSelfPath_)
+ selfPath_.stop();
mainPath_.stop();
- ret = stat_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop statistics for " << camera->id();
+ if (!isRaw_) {
+ ret = stat_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop statistics for " << camera->id();
- ret = param_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop parameters for " << camera->id();
+ ret = param_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop parameters for " << camera->id();
+ }
ASSERT(data->queuedRequests_.empty());
data->frameInfo_.clear();
@@ -869,12 +1024,21 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
{
RkISP1CameraData *data = cameraData(camera);
- RkISP1FrameInfo *info = data->frameInfo_.create(data, request);
+ RkISP1FrameInfo *info = data->frameInfo_.create(data, request, isRaw_);
if (!info)
return -ENOENT;
data->ipa_->queueRequest(data->frame_, request->controls());
- data->ipa_->fillParamsBuffer(data->frame_, info->paramBuffer->cookie());
+ if (isRaw_) {
+ if (info->mainPathBuffer)
+ data->mainPath_->queueBuffer(info->mainPathBuffer);
+
+ if (data->selfPath_ && info->selfPathBuffer)
+ data->selfPath_->queueBuffer(info->selfPathBuffer);
+ } else {
+ data->ipa_->fillParamsBuffer(data->frame_,
+ info->paramBuffer->cookie());
+ }
data->frame_++;
@@ -900,8 +1064,7 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
* Configure the sensor links: enable the link corresponding to this
* camera.
*/
- const MediaPad *pad = isp_->entity()->getPadByIndex(0);
- for (MediaLink *link : pad->links()) {
+ for (MediaLink *link : ispSink_->links()) {
if (link->source()->entity() != sensor->entity())
continue;
@@ -915,10 +1078,18 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
return ret;
}
+ if (csi_) {
+ MediaLink *link = isp_->entity()->getPadByIndex(0)->links().at(0);
+
+ ret = link->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
for (const StreamConfiguration &cfg : config) {
if (cfg.stream() == &data->mainPathStream_)
ret = data->mainPath_->setEnabled(true);
- else if (cfg.stream() == &data->selfPathStream_)
+ else if (hasSelfPath_ && cfg.stream() == &data->selfPathStream_)
ret = data->selfPath_->setEnabled(true);
else
return -EINVAL;
@@ -935,15 +1106,8 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
int ret;
std::unique_ptr<RkISP1CameraData> data =
- std::make_unique<RkISP1CameraData>(this, &mainPath_, &selfPath_);
-
- ControlInfoMap::Map ctrls;
- ctrls.emplace(std::piecewise_construct,
- std::forward_as_tuple(&controls::AeEnable),
- std::forward_as_tuple(false, true));
-
- data->controlInfo_ = ControlInfoMap(std::move(ctrls),
- controls::controls);
+ std::make_unique<RkISP1CameraData>(this, &mainPath_,
+ hasSelfPath_ ? &selfPath_ : nullptr);
data->sensor_ = std::make_unique<CameraSensor>(sensor);
ret = data->sensor_->init();
@@ -991,9 +1155,7 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
DeviceMatch dm("rkisp1");
dm.add("rkisp1_isp");
- dm.add("rkisp1_resizer_selfpath");
dm.add("rkisp1_resizer_mainpath");
- dm.add("rkisp1_selfpath");
dm.add("rkisp1_mainpath");
dm.add("rkisp1_stats");
dm.add("rkisp1_params");
@@ -1008,11 +1170,29 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
return false;
}
+ hasSelfPath_ = !!media_->getEntityByName("rkisp1_selfpath");
+
/* Create the V4L2 subdevices we will need. */
isp_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_isp");
if (isp_->open() < 0)
return false;
+ /* Locate and open the optional CSI-2 receiver. */
+ ispSink_ = isp_->entity()->getPadByIndex(0);
+ if (!ispSink_ || ispSink_->links().empty())
+ return false;
+
+ pad = ispSink_->links().at(0)->source();
+ if (pad->entity()->function() == MEDIA_ENT_F_VID_IF_BRIDGE) {
+ csi_ = std::make_unique<V4L2Subdevice>(pad->entity());
+ if (csi_->open() < 0)
+ return false;
+
+ ispSink_ = csi_->entity()->getPadByIndex(0);
+ if (!ispSink_)
+ return false;
+ }
+
/* Locate and open the stats and params video nodes. */
stat_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_stats");
if (stat_->open() < 0)
@@ -1026,11 +1206,12 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (!mainPath_.init(media_))
return false;
- if (!selfPath_.init(media_))
+ if (hasSelfPath_ && !selfPath_.init(media_))
return false;
mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
- selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
+ if (hasSelfPath_)
+ selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady);
param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady);
@@ -1038,12 +1219,8 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
* Enumerate all sensors connected to the ISP and create one
* camera instance for each of them.
*/
- pad = isp_->entity()->getPadByIndex(0);
- if (!pad)
- return false;
-
bool registered = false;
- for (MediaLink *link : pad->links()) {
+ for (MediaLink *link : ispSink_->links()) {
if (!createCamera(link->source()->entity()))
registered = true;
}
@@ -1055,12 +1232,10 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
* Buffer Handling
*/
-void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
+void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info)
{
RkISP1CameraData *data = cameraData(activeCamera_);
- RkISP1FrameInfo *info = data->frameInfo_.find(request);
- if (!info)
- return;
+ Request *request = info->request;
if (request->hasPendingBuffers())
return;
@@ -1068,7 +1243,7 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
if (!info->metadataProcessed)
return;
- if (!info->paramDequeued)
+ if (!isRaw_ && !info->paramDequeued)
return;
data->frameInfo_.destroy(info->frame);
@@ -1078,19 +1253,38 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
{
+ ASSERT(activeCamera_);
+ RkISP1CameraData *data = cameraData(activeCamera_);
+
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
+ if (!info)
+ return;
+
+ const FrameMetadata &metadata = buffer->metadata();
Request *request = buffer->request();
- /*
- * Record the sensor's timestamp in the request metadata.
- *
- * \todo The sensor timestamp should be better estimated by connecting
- * to the V4L2Device::frameStart signal.
- */
- request->metadata().set(controls::SensorTimestamp,
- buffer->metadata().timestamp);
+ if (metadata.status != FrameMetadata::FrameCancelled) {
+ /*
+ * Record the sensor's timestamp in the request metadata.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal.
+ */
+ request->metadata().set(controls::SensorTimestamp,
+ metadata.timestamp);
+
+ if (isRaw_) {
+ const ControlList &ctrls =
+ data->delayedCtrls_->get(metadata.sequence);
+ data->ipa_->processStatsBuffer(info->frame, 0, ctrls);
+ }
+ } else {
+ if (isRaw_)
+ info->metadataProcessed = true;
+ }
completeBuffer(request, buffer);
- tryCompleteRequest(request);
+ tryCompleteRequest(info);
}
void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
@@ -1103,7 +1297,7 @@ void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
return;
info->paramDequeued = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
}
void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
@@ -1117,7 +1311,7 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
info->metadataProcessed = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
return;
}
@@ -1128,6 +1322,6 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
data->delayedCtrls_->get(buffer->metadata().sequence));
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
index 6f175758..c49017d1 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.cpp - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#include "rkisp1_path.h"
@@ -12,6 +12,7 @@
#include <libcamera/formats.h>
#include <libcamera/stream.h>
+#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
@@ -20,6 +21,39 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(RkISP1)
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1.cpp. */
+const std::map<PixelFormat, uint32_t> formatToMediaBus = {
+ { formats::UYVY, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV12, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV21, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV16, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV61, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUV420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YVU420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YUV422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YVU422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::R8, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::RGB565, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
const Size &minResolution, const Size &maxResolution)
: name_(name), running_(false), formats_(formats),
@@ -41,6 +75,8 @@ bool RkISP1Path::init(MediaDevice *media)
if (video_->open() < 0)
return false;
+ populateFormats();
+
link_ = media->link("rkisp1_isp", 2, resizer, 0);
if (!link_)
return false;
@@ -48,40 +84,227 @@ bool RkISP1Path::init(MediaDevice *media)
return true;
}
-StreamConfiguration RkISP1Path::generateConfiguration(const Size &resolution)
+void RkISP1Path::populateFormats()
{
+ V4L2VideoDevice::Formats v4l2Formats = video_->formats();
+ if (v4l2Formats.empty()) {
+ LOG(RkISP1, Info)
+ << "Failed to enumerate supported formats and sizes, using defaults";
+
+ for (const PixelFormat &format : formats_)
+ streamFormats_.insert(format);
+ return;
+ }
+
+ minResolution_ = { 65535, 65535 };
+ maxResolution_ = { 0, 0 };
+
+ std::vector<PixelFormat> formats;
+ for (const auto &[format, sizes] : v4l2Formats) {
+ const PixelFormat pixelFormat = format.toPixelFormat();
+
+ /*
+ * As a defensive measure, skip any pixel format exposed by the
+ * driver that we don't know about. This ensures that looking up
+ * formats in formatToMediaBus using a key from streamFormats_
+ * will never fail in any of the other functions.
+ */
+ if (!formatToMediaBus.count(pixelFormat)) {
+ LOG(RkISP1, Warning)
+ << "Unsupported pixel format " << pixelFormat;
+ continue;
+ }
+
+ streamFormats_.insert(pixelFormat);
+
+ for (const auto &size : sizes) {
+ if (minResolution_ > size.min)
+ minResolution_ = size.min;
+ if (maxResolution_ < size.max)
+ maxResolution_ = size.max;
+ }
+ }
+}
+
+StreamConfiguration
+RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
+ StreamRole role)
+{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ const Size &resolution = sensor->resolution();
+
+ /* Min and max resolutions to populate the available stream formats. */
Size maxResolution = maxResolution_.boundedToAspectRatio(resolution)
.boundedTo(resolution);
Size minResolution = minResolution_.expandedToAspectRatio(resolution);
+ /* The desired stream size, bound to the max resolution. */
+ Size streamSize = size.boundedTo(maxResolution);
+
+ /* Create the list of supported stream formats. */
std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
- for (const PixelFormat &format : formats_)
- streamFormats[format] = { { minResolution, maxResolution } };
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ /* Populate stream formats for non-RAW configurations. */
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW) {
+ if (role == StreamRole::Raw)
+ continue;
+
+ streamFormats[format] = { { minResolution, maxResolution } };
+ continue;
+ }
+
+ /* Skip RAW formats for non-raw roles. */
+ if (role != StreamRole::Raw)
+ continue;
+
+ /* Populate stream formats for RAW configurations. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ /* Skip formats not supported by sensor. */
+ continue;
+
+ /* Add all the RAW sizes the sensor can produce for this code. */
+ for (const auto &rawSize : sensor->sizes(mbusCode)) {
+ if (rawSize.width > maxResolution_.width ||
+ rawSize.height > maxResolution_.height)
+ continue;
+
+ streamFormats[format].push_back({ rawSize, rawSize });
+ }
+
+ /*
+ * Store the raw format with the highest bits per pixel for
+ * later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ /*
+ * Pick a suitable pixel format for the role. Raw streams need to use a
+ * raw format, processed streams use NV12 by default.
+ */
+ PixelFormat format;
+
+ if (role == StreamRole::Raw) {
+ if (!rawFormat.isValid()) {
+ LOG(RkISP1, Error)
+ << "Sensor " << sensor->model()
+ << " doesn't support raw capture";
+ return {};
+ }
+
+ format = rawFormat;
+ } else {
+ format = formats::NV12;
+ }
StreamFormats formats(streamFormats);
StreamConfiguration cfg(formats);
- cfg.pixelFormat = formats::NV12;
- cfg.size = maxResolution;
+ cfg.pixelFormat = format;
+ cfg.size = streamSize;
cfg.bufferCount = RKISP1_BUFFER_COUNT;
return cfg;
}
-CameraConfiguration::Status RkISP1Path::validate(StreamConfiguration *cfg)
+CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor,
+ StreamConfiguration *cfg)
{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ const Size &resolution = sensor->resolution();
+
const StreamConfiguration reqCfg = *cfg;
CameraConfiguration::Status status = CameraConfiguration::Valid;
- if (std::find(formats_.begin(), formats_.end(), cfg->pixelFormat) ==
- formats_.end())
- cfg->pixelFormat = formats::NV12;
+ /*
+ * Validate the pixel format. If the requested format isn't supported,
+ * default to either NV12 (all versions of the ISP are guaranteed to
+ * support NV12 on both the main and self paths) if the requested format
+ * is not a raw format, or to the supported raw format with the highest
+ * bits per pixel otherwise.
+ */
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+ bool found = false;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Skip raw formats not supported by the sensor. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ continue;
+
+ /*
+ * Store the raw format with the highest bits per pixel
+ * for later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ if (cfg->pixelFormat == format) {
+ found = true;
+ break;
+ }
+ }
+
+ bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+
+ /*
+ * If no raw format supported by the sensor has been found, use a
+ * processed format.
+ */
+ if (!rawFormat.isValid())
+ isRaw = false;
+
+ if (!found)
+ cfg->pixelFormat = isRaw ? rawFormat : formats::NV12;
+
+ Size minResolution;
+ Size maxResolution;
+
+ if (isRaw) {
+ /*
+ * Use the sensor output size closest to the requested stream
+ * size.
+ */
+ uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat);
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, cfg->size);
+
+ minResolution = sensorFormat.size;
+ maxResolution = sensorFormat.size;
+ } else {
+ /*
+ * Adjust the size based on the sensor resolution and absolute
+ * limits of the ISP.
+ */
+ minResolution = minResolution_.expandedToAspectRatio(resolution);
+ maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ }
- cfg->size.boundTo(maxResolution_);
- cfg->size.expandTo(minResolution_);
+ cfg->size.boundTo(maxResolution);
+ cfg->size.expandTo(minResolution);
cfg->bufferCount = RKISP1_BUFFER_COUNT;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg->pixelFormat);
+ format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
format.size = cfg->size;
int ret = video_->tryFormat(&format);
@@ -112,7 +335,18 @@ int RkISP1Path::configure(const StreamConfiguration &config,
if (ret < 0)
return ret;
- Rectangle rect(0, 0, ispFormat.size);
+ /*
+ * Crop on the resizer input to maintain FOV before downscaling.
+ *
+ * \todo The alignment to a multiple of 2 pixels is required but may
+ * change the aspect ratio very slightly. A more advanced algorithm to
+ * compute the resizer input crop rectangle is needed, and it should
+ * also take into account the need to crop away the edge pixels affected
+ * by the ISP processing blocks.
+ */
+ Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size)
+ .alignedUpTo(2, 2);
+ Rectangle rect = ispCrop.centeredTo(Rectangle(inputFormat.size).center());
ret = resizer_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
if (ret < 0)
return ret;
@@ -127,15 +361,11 @@ int RkISP1Path::configure(const StreamConfiguration &config,
<< "Configuring " << name_ << " resizer output pad with "
<< ispFormat;
- switch (config.pixelFormat) {
- case formats::NV12:
- case formats::NV21:
- ispFormat.mbus_code = MEDIA_BUS_FMT_YUYV8_1_5X8;
- break;
- default:
- ispFormat.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
- break;
- }
+ /*
+ * The configuration has been validated, the pixel format is guaranteed
+ * to be supported and thus found in formatToMediaBus.
+ */
+ ispFormat.code = formatToMediaBus.at(config.pixelFormat);
ret = resizer_->setFormat(1, &ispFormat);
if (ret < 0)
@@ -147,7 +377,7 @@ int RkISP1Path::configure(const StreamConfiguration &config,
const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
V4L2DeviceFormat outputFormat;
- outputFormat.fourcc = V4L2PixelFormat::fromPixelFormat(config.pixelFormat);
+ outputFormat.fourcc = video_->toV4L2PixelFormat(config.pixelFormat);
outputFormat.size = config.size;
outputFormat.planesCount = info.numPlanes();
@@ -156,7 +386,7 @@ int RkISP1Path::configure(const StreamConfiguration &config,
return ret;
if (outputFormat.size != config.size ||
- outputFormat.fourcc != V4L2PixelFormat::fromPixelFormat(config.pixelFormat)) {
+ outputFormat.fourcc != video_->toV4L2PixelFormat(config.pixelFormat)) {
LOG(RkISP1, Error)
<< "Unable to configure capture in " << config.toString();
return -EINVAL;
@@ -204,17 +434,32 @@ void RkISP1Path::stop()
running_ = false;
}
+/*
+ * \todo Remove the hardcoded resolutions and formats once all users will have
+ * migrated to a recent enough kernel.
+ */
namespace {
constexpr Size RKISP1_RSZ_MP_SRC_MIN{ 32, 16 };
constexpr Size RKISP1_RSZ_MP_SRC_MAX{ 4416, 3312 };
-constexpr std::array<PixelFormat, 6> RKISP1_RSZ_MP_FORMATS{
+constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
formats::YUYV,
formats::NV16,
formats::NV61,
formats::NV21,
formats::NV12,
formats::R8,
- /* \todo Add support for RAW formats. */
+ formats::SBGGR8,
+ formats::SGBRG8,
+ formats::SGRBG8,
+ formats::SRGGB8,
+ formats::SBGGR10,
+ formats::SGBRG10,
+ formats::SGRBG10,
+ formats::SRGGB10,
+ formats::SBGGR12,
+ formats::SGBRG12,
+ formats::SGRBG12,
+ formats::SRGGB12,
};
constexpr Size RKISP1_RSZ_SP_SRC_MIN{ 32, 16 };
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
index f3f1ae39..08edefec 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.h - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#pragma once
#include <memory>
+#include <set>
#include <vector>
#include <libcamera/base/signal.h>
@@ -22,6 +23,7 @@
namespace libcamera {
+class CameraSensor;
class MediaDevice;
class V4L2Subdevice;
struct StreamConfiguration;
@@ -38,8 +40,11 @@ public:
int setEnabled(bool enable) { return link_->setEnabled(enable); }
bool isEnabled() const { return link_->flags() & MEDIA_LNK_FL_ENABLED; }
- StreamConfiguration generateConfiguration(const Size &resolution);
- CameraConfiguration::Status validate(StreamConfiguration *cfg);
+ StreamConfiguration generateConfiguration(const CameraSensor *sensor,
+ const Size &resolution,
+ StreamRole role);
+ CameraConfiguration::Status validate(const CameraSensor *sensor,
+ StreamConfiguration *cfg);
int configure(const StreamConfiguration &config,
const V4L2SubdeviceFormat &inputFormat);
@@ -57,14 +62,17 @@ public:
Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; }
private:
+ void populateFormats();
+
static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
const char *name_;
bool running_;
const Span<const PixelFormat> formats_;
- const Size minResolution_;
- const Size maxResolution_;
+ std::set<PixelFormat> streamFormats_;
+ Size minResolution_;
+ Size maxResolution_;
std::unique_ptr<V4L2Subdevice> resizer_;
std::unique_ptr<V4L2VideoDevice> video_;
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.cpp b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
new file mode 100644
index 00000000..ad50a7c8
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#include "delayed_controls.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/v4l2_device.h"
+
+/**
+ * \file delayed_controls.h
+ * \brief Helper to deal with controls that take effect with a delay
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RPiDelayedControls)
+
+namespace RPi {
+
+/**
+ * \class DelayedControls
+ * \brief Helper to deal with controls that take effect with a delay
+ *
+ * Some sensor controls take effect with a delay as the sensor needs time to
+ * adjust, for example exposure and analog gain. This is a helper class to deal
+ * with such controls and the intended users are pipeline handlers.
+ *
+ * The idea is to extend the concept of the buffer depth of a pipeline the
+ * application needs to maintain to also cover controls. Just as with buffer
+ * depth if the application keeps the number of requests queued above the
+ * control depth the controls are guaranteed to take effect for the correct
+ * request. The control depth is determined by the control with the greatest
+ * delay.
+ */
+
+/**
+ * \struct DelayedControls::ControlParams
+ * \brief Parameters associated with controls handled by the \a DelayedControls
+ * helper class
+ *
+ * \var ControlParams::delay
+ * \brief Frame delay from setting the control on a sensor device to when it is
+ * consumed during framing.
+ *
+ * \var ControlParams::priorityWrite
+ * \brief Flag to indicate that this control must be applied ahead of, and
+ * separately from the other controls.
+ *
+ * Typically set for the \a V4L2_CID_VBLANK control so that the device driver
+ * does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
+ * the existing vertical blanking specified bounds, but are within the new
+ * blanking bounds.
+ */
+
+/**
+ * \brief Construct a DelayedControls instance
+ * \param[in] device The V4L2 device the controls have to be applied to
+ * \param[in] controlParams Map of the numerical V4L2 control ids to their
+ * associated control parameters.
+ *
+ * The control parameters comprise of delays (in frames) and a priority write
+ * flag. If this flag is set, the relevant control is written separately from,
+ * and ahead of the rest of the batched controls.
+ *
+ * Only controls specified in \a controlParams are handled. If it's desired to
+ * mix delayed controls and controls that take effect immediately the immediate
+ * controls must be listed in the \a controlParams map with a delay value of 0.
+ */
+DelayedControls::DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams)
+ : device_(device), maxDelay_(0)
+{
+ const ControlInfoMap &controls = device_->controls();
+
+ /*
+ * Create a map of control ids to delays for controls exposed by the
+ * device.
+ */
+ for (auto const &param : controlParams) {
+ auto it = controls.find(param.first);
+ if (it == controls.end()) {
+ LOG(RPiDelayedControls, Error)
+ << "Delay request for control id "
+ << utils::hex(param.first)
+ << " but control is not exposed by device "
+ << device_->deviceNode();
+ continue;
+ }
+
+ const ControlId *id = it->first;
+
+ controlParams_[id] = param.second;
+
+ LOG(RPiDelayedControls, Debug)
+ << "Set a delay of " << controlParams_[id].delay
+ << " and priority write flag " << controlParams_[id].priorityWrite
+ << " for " << id->name();
+
+ maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
+ }
+
+ reset(0);
+}
+
+/**
+ * \brief Reset state machine
+ *
+ * Resets the state machine to a starting position based on control values
+ * retrieved from the device.
+ */
+void DelayedControls::reset(unsigned int cookie)
+{
+ queueCount_ = 1;
+ writeCount_ = 0;
+ cookies_[0] = cookie;
+
+ /* Retrieve control as reported by the device. */
+ std::vector<uint32_t> ids;
+ for (auto const &param : controlParams_)
+ ids.push_back(param.first->id());
+
+ ControlList controls = device_->getControls(ids);
+
+ /* Seed the control queue with the controls reported by the device. */
+ values_.clear();
+ for (const auto &ctrl : controls) {
+ const ControlId *id = device_->controls().idmap().at(ctrl.first);
+ /*
+ * Do not mark this control value as updated, it does not need
+ * to be written to to device on startup.
+ */
+ values_[id][0] = Info(ctrl.second, false);
+ }
+}
+
+/**
+ * \brief Push a set of controls on the queue
+ * \param[in] controls List of controls to add to the device queue
+ *
+ * Push a set of controls to the control queue. This increases the control queue
+ * depth by one.
+ *
+ * \returns true if \a controls are accepted, or false otherwise
+ */
+bool DelayedControls::push(const ControlList &controls, const unsigned int cookie)
+{
+ /* Copy state from previous frame. */
+ for (auto &ctrl : values_) {
+ Info &info = ctrl.second[queueCount_];
+ info = values_[ctrl.first][queueCount_ - 1];
+ info.updated = false;
+ }
+
+ /* Update with new controls. */
+ const ControlIdMap &idmap = device_->controls().idmap();
+ for (const auto &control : controls) {
+ const auto &it = idmap.find(control.first);
+ if (it == idmap.end()) {
+ LOG(RPiDelayedControls, Warning)
+ << "Unknown control " << control.first;
+ return false;
+ }
+
+ const ControlId *id = it->second;
+
+ if (controlParams_.find(id) == controlParams_.end())
+ return false;
+
+ Info &info = values_[id][queueCount_];
+
+ info = Info(control.second);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Queuing " << id->name()
+ << " to " << info.toString()
+ << " at index " << queueCount_;
+ }
+
+ cookies_[queueCount_] = cookie;
+ queueCount_++;
+
+ return true;
+}
+
+/**
+ * \brief Read back controls in effect at a sequence number
+ * \param[in] sequence The sequence number to get controls for
+ *
+ * Read back what controls where in effect at a specific sequence number. The
+ * history is a ring buffer of 16 entries where new and old values coexist. It's
+ * the callers responsibility to not read too old sequence numbers that have been
+ * pushed out of the history.
+ *
+ * Historic values are evicted by pushing new values onto the queue using
+ * push(). The max history from the current sequence number that yields valid
+ * values are thus 16 minus number of controls pushed.
+ *
+ * \return The controls at \a sequence number
+ */
+std::pair<ControlList, unsigned int> DelayedControls::get(uint32_t sequence)
+{
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
+
+ ControlList out(device_->controls());
+ for (const auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ const Info &info = ctrl.second[index];
+
+ out.set(id->id(), info);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Reading " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+ }
+
+ return { out, cookies_[index] };
+}
+
+/**
+ * \brief Inform DelayedControls of the start of a new frame
+ * \param[in] sequence Sequence number of the frame that started
+ *
+ * Inform the state machine that a new frame has started and of its sequence
+ * number. Any user of these helpers is responsible to inform the helper about
+ * the start of any frame. This can be connected with ease to the start of a
+ * exposure (SOE) V4L2 event.
+ */
+void DelayedControls::applyControls(uint32_t sequence)
+{
+ LOG(RPiDelayedControls, Debug) << "frame " << sequence << " started";
+
+ /*
+ * Create control list peeking ahead in the value queue to ensure
+ * values are set in time to satisfy the sensor delay.
+ */
+ ControlList out(device_->controls());
+ for (auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
+ unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
+ Info &info = ctrl.second[index];
+
+ if (info.updated) {
+ if (controlParams_[id].priorityWrite) {
+ /*
+ * This control must be written now, it could
+ * affect validity of the other controls.
+ */
+ ControlList priority(device_->controls());
+ priority.set(id->id(), info);
+ device_->setControls(&priority);
+ } else {
+ /*
+ * Batch up the list of controls and write them
+ * at the end of the function.
+ */
+ out.set(id->id(), info);
+ }
+
+ LOG(RPiDelayedControls, Debug)
+ << "Setting " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+
+ /* Done with this update, so mark as completed. */
+ info.updated = false;
+ }
+ }
+
+ writeCount_ = sequence + 1;
+
+ while (writeCount_ > queueCount_) {
+ LOG(RPiDelayedControls, Debug)
+ << "Queue is empty, auto queue no-op.";
+ push({}, cookies_[queueCount_ - 1]);
+ }
+
+ device_->setControls(&out);
+}
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.h b/src/libcamera/pipeline/rpi/common/delayed_controls.h
new file mode 100644
index 00000000..487b0057
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <unordered_map>
+#include <utility>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class V4L2Device;
+
+namespace RPi {
+
+class DelayedControls
+{
+public:
+ struct ControlParams {
+ unsigned int delay;
+ bool priorityWrite;
+ };
+
+ DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams);
+
+ void reset(unsigned int cookie);
+
+ bool push(const ControlList &controls, unsigned int cookie);
+ std::pair<ControlList, unsigned int> get(uint32_t sequence);
+
+ void applyControls(uint32_t sequence);
+
+private:
+ class Info : public ControlValue
+ {
+ public:
+ Info()
+ : updated(false)
+ {
+ }
+
+ Info(const ControlValue &v, bool updated_ = true)
+ : ControlValue(v), updated(updated_)
+ {
+ }
+
+ bool updated;
+ };
+
+ static constexpr int listSize = 16;
+ template<typename T>
+ class RingBuffer : public std::array<T, listSize>
+ {
+ public:
+ T &operator[](unsigned int index)
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+
+ const T &operator[](unsigned int index) const
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+ };
+
+ V4L2Device *device_;
+ std::unordered_map<const ControlId *, ControlParams> controlParams_;
+ unsigned int maxDelay_;
+
+ uint32_t queueCount_;
+ uint32_t writeCount_;
+ std::unordered_map<const ControlId *, RingBuffer<Info>> values_;
+ RingBuffer<unsigned int> cookies_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/meson.build b/src/libcamera/pipeline/rpi/common/meson.build
index f1a2f5ee..8fb7e823 100644
--- a/src/libcamera/pipeline/raspberrypi/meson.build
+++ b/src/libcamera/pipeline/rpi/common/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: CC0-1.0
libcamera_sources += files([
- 'dma_heaps.cpp',
- 'raspberrypi.cpp',
+ 'delayed_controls.cpp',
+ 'pipeline_base.cpp',
'rpi_stream.cpp',
])
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
new file mode 100644
index 00000000..289af516
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
@@ -0,0 +1,1491 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include "pipeline_base.h"
+
+#include <chrono>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/logging.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+using namespace RPi;
+
+LOG_DEFINE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+
+namespace {
+
+constexpr unsigned int defaultRawBitDepth = 12;
+
+PixelFormat mbusCodeToPixelFormat(unsigned int code,
+ BayerFormat::Packing packingReq)
+{
+ BayerFormat bayer = BayerFormat::fromMbusCode(code);
+
+ ASSERT(bayer.isValid());
+
+ bayer.packing = packingReq;
+ PixelFormat pix = bayer.toPixelFormat();
+
+ /*
+ * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
+ * variants. So if the PixelFormat returns as invalid, use the non-packed
+ * conversion instead.
+ */
+ if (!pix.isValid()) {
+ bayer.packing = BayerFormat::Packing::None;
+ pix = bayer.toPixelFormat();
+ }
+
+ return pix;
+}
+
+bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
+{
+ unsigned int mbusCode = sensor->mbusCodes()[0];
+ const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
+
+ return bayer.order == BayerFormat::Order::MONO;
+}
+
+const std::vector<ColorSpace> validColorSpaces = {
+ ColorSpace::Sycc,
+ ColorSpace::Smpte170m,
+ ColorSpace::Rec709
+};
+
+std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
+{
+ for (auto cs : validColorSpaces) {
+ if (colourSpace.primaries == cs.primaries &&
+ colourSpace.transferFunction == cs.transferFunction)
+ return cs;
+ }
+
+ return std::nullopt;
+}
+
+} /* namespace */
+
+/*
+ * Raspberry Pi drivers expect the following colour spaces:
+ * - V4L2_COLORSPACE_RAW for raw streams.
+ * - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
+ * non-raw streams. Other fields such as transfer function, YCbCr encoding and
+ * quantisation are not used.
+ *
+ * The libcamera colour spaces that we wish to use corresponding to these are therefore:
+ * - ColorSpace::Raw for V4L2_COLORSPACE_RAW
+ * - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
+ * - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
+ * - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
+ */
+CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
+{
+ Status status = Valid;
+ yuvColorSpace_.reset();
+
+ for (auto cfg : config_) {
+ /* First fix up raw streams to have the "raw" colour space. */
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) {
+ /* If there was no value here, that doesn't count as "adjusted". */
+ if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
+ status = Adjusted;
+ cfg.colorSpace = ColorSpace::Raw;
+ continue;
+ }
+
+ /* Next we need to find our shared colour space. The first valid one will do. */
+ if (cfg.colorSpace && !yuvColorSpace_)
+ yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
+ }
+
+ /* If no colour space was given anywhere, choose sYCC. */
+ if (!yuvColorSpace_)
+ yuvColorSpace_ = ColorSpace::Sycc;
+
+ /* Note the version of this that any RGB streams will have to use. */
+ rgbColorSpace_ = yuvColorSpace_;
+ rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ rgbColorSpace_->range = ColorSpace::Range::Full;
+
+ /* Go through the streams again and force everyone to the same colour space. */
+ for (auto cfg : config_) {
+ if (cfg.colorSpace == ColorSpace::Raw)
+ continue;
+
+ if (PipelineHandlerBase::isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
+ /* Again, no value means "not adjusted". */
+ if (cfg.colorSpace)
+ status = Adjusted;
+ cfg.colorSpace = yuvColorSpace_;
+ }
+ if (PipelineHandlerBase::isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
+ /* Be nice, and let the YUV version count as non-adjusted too. */
+ if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
+ status = Adjusted;
+ cfg.colorSpace = rgbColorSpace_;
+ }
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status RPiCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig && !sensorConfig->isValid()) {
+ LOG(RPI, Error) << "Invalid sensor configuration request";
+ return Invalid;
+ }
+
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
+ */
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
+ rawStreams_.clear();
+ outStreams_.clear();
+
+ for (const auto &[index, cfg] : utils::enumerate(config_)) {
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat))
+ rawStreams_.emplace_back(index, &cfg);
+ else
+ outStreams_.emplace_back(index, &cfg);
+ }
+
+ /* Sort the streams so the highest resolution is first. */
+ std::sort(rawStreams_.begin(), rawStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ std::sort(outStreams_.begin(), outStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ /* Compute the sensor's format then do any platform specific fixups. */
+ unsigned int bitDepth;
+ Size sensorSize;
+
+ if (sensorConfig) {
+ /* Use the application provided sensor configuration. */
+ bitDepth = sensorConfig->bitDepth;
+ sensorSize = sensorConfig->outputSize;
+ } else if (!rawStreams_.empty()) {
+ /* Use the RAW stream format and size. */
+ BayerFormat bayerFormat = BayerFormat::fromPixelFormat(rawStreams_[0].cfg->pixelFormat);
+ bitDepth = bayerFormat.bitDepth;
+ sensorSize = rawStreams_[0].cfg->size;
+ } else {
+ bitDepth = defaultRawBitDepth;
+ sensorSize = outStreams_[0].cfg->size;
+ }
+
+ sensorFormat_ = data_->findBestFormat(sensorSize, bitDepth);
+
+ /*
+ * If a sensor configuration has been requested, it should apply
+ * without modifications.
+ */
+ if (sensorConfig) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(sensorFormat_.code);
+
+ if (bayer.bitDepth != sensorConfig->bitDepth ||
+ sensorFormat_.size != sensorConfig->outputSize) {
+ LOG(RPI, Error) << "Invalid sensor configuration: "
+ << "bitDepth/size mismatch";
+ return Invalid;
+ }
+ }
+
+ /* Start with some initial generic RAW stream adjustments. */
+ for (auto &raw : rawStreams_) {
+ StreamConfiguration *rawStream = raw.cfg;
+
+ /*
+ * Some sensors change their Bayer order when they are
+ * h-flipped or v-flipped, according to the transform. Adjust
+ * the RAW stream to match the computed sensor format by
+ * applying the sensor Bayer order resulting from the transform
+ * to the user request.
+ */
+
+ BayerFormat cfgBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+ cfgBayer.order = data_->sensor_->bayerOrder(combinedTransform_);
+
+ if (rawStream->pixelFormat != cfgBayer.toPixelFormat()) {
+ rawStream->pixelFormat = cfgBayer.toPixelFormat();
+ status = Adjusted;
+ }
+ }
+
+ /* Do any platform specific fixups. */
+ Status st = data_->platformValidate(this);
+ if (st == Invalid)
+ return Invalid;
+ else if (st == Adjusted)
+ status = Adjusted;
+
+ /* Further fixups on the RAW streams. */
+ for (auto &raw : rawStreams_) {
+ int ret = raw.dev->tryFormat(&raw.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(raw.cfg, raw.format))
+ status = Adjusted;
+ }
+
+ /* Further fixups on the ISP output streams. */
+ for (auto &out : outStreams_) {
+
+ /*
+ * We want to send the associated YCbCr info through to the driver.
+ *
+ * But for RGB streams, the YCbCr info gets overwritten on the way back
+ * so we must check against what the stream cfg says, not what we actually
+ * requested (which carefully included the YCbCr info)!
+ */
+ out.format.colorSpace = yuvColorSpace_;
+
+ LOG(RPI, Debug)
+ << "Try color space " << ColorSpace::toString(out.cfg->colorSpace);
+
+ int ret = out.dev->tryFormat(&out.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(out.cfg, out.format))
+ status = Adjusted;
+ }
+
+ return status;
+}
+
+bool PipelineHandlerBase::isRgb(const PixelFormat &pixFmt)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
+}
+
+bool PipelineHandlerBase::isYuv(const PixelFormat &pixFmt)
+{
+ /* The code below would return true for raw mono streams, so weed those out first. */
+ if (PipelineHandlerBase::isRaw(pixFmt))
+ return false;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
+}
+
+bool PipelineHandlerBase::isRaw(const PixelFormat &pixFmt)
+{
+ /* This test works for both Bayer and raw mono formats. */
+ return BayerFormat::fromPixelFormat(pixFmt).isValid();
+}
+
+/*
+ * Adjust a StreamConfiguration fields to match a video device format.
+ * Returns true if the StreamConfiguration has been adjusted.
+ */
+bool PipelineHandlerBase::updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format)
+{
+ const PixelFormat &pixFormat = format.fourcc.toPixelFormat();
+ bool adjusted = false;
+
+ if (stream->pixelFormat != pixFormat || stream->size != format.size) {
+ stream->pixelFormat = pixFormat;
+ stream->size = format.size;
+ adjusted = true;
+ }
+
+ if (stream->colorSpace != format.colorSpace) {
+ stream->colorSpace = format.colorSpace;
+ adjusted = true;
+ LOG(RPI, Debug)
+ << "Color space changed from "
+ << ColorSpace::toString(stream->colorSpace) << " to "
+ << ColorSpace::toString(format.colorSpace);
+ }
+
+ stream->stride = format.planes[0].bpl;
+ stream->frameSize = format.planes[0].size;
+
+ return adjusted;
+}
+
+/*
+ * Populate and return a video device format using a StreamConfiguration. */
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream)
+{
+ V4L2DeviceFormat deviceFormat;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(stream->pixelFormat);
+ deviceFormat.planesCount = info.numPlanes();
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(stream->pixelFormat);
+ deviceFormat.size = stream->size;
+ deviceFormat.planes[0].bpl = stream->stride;
+ deviceFormat.colorSpace = stream->colorSpace;
+
+ return deviceFormat;
+}
+
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq)
+{
+ unsigned int code = format.code;
+ const PixelFormat pix = mbusCodeToPixelFormat(code, packingReq);
+ V4L2DeviceFormat deviceFormat;
+
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
+ deviceFormat.size = format.size;
+ deviceFormat.colorSpace = format.colorSpace;
+ return deviceFormat;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RPiCameraConfiguration>(data);
+ V4L2SubdeviceFormat sensorFormat;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ V4L2VideoDevice::Formats fmts;
+ Size size;
+ std::optional<ColorSpace> colorSpace;
+
+ if (roles.empty())
+ return config;
+
+ Size sensorSize = data->sensor_->resolution();
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::Raw:
+ size = sensorSize;
+ sensorFormat = data->findBestFormat(size, defaultRawBitDepth);
+ pixelFormat = mbusCodeToPixelFormat(sensorFormat.code,
+ BayerFormat::Packing::CSI2);
+ ASSERT(pixelFormat.isValid());
+ colorSpace = ColorSpace::Raw;
+ bufferCount = 2;
+ break;
+
+ case StreamRole::StillCapture:
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Still image codecs usually expect the sYCC color space.
+ * Even RGB codecs will be fine as the RGB we get with the
+ * sYCC color space is the same as sRGB.
+ */
+ colorSpace = ColorSpace::Sycc;
+ /* Return the largest sensor resolution. */
+ size = sensorSize;
+ bufferCount = 1;
+ break;
+
+ case StreamRole::VideoRecording:
+ /*
+ * The colour denoise algorithm requires the analysis
+ * image, produced by the second ISP output, to be in
+ * YUV420 format. Select this format as the default, to
+ * maximize chances that it will be picked by
+ * applications and enable usage of the colour denoise
+ * algorithm.
+ */
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Choose a color space appropriate for video recording.
+ * Rec.709 will be a good default for HD resolutions.
+ */
+ colorSpace = ColorSpace::Rec709;
+ size = { 1920, 1080 };
+ bufferCount = 4;
+ break;
+
+ case StreamRole::Viewfinder:
+ fmts = data->ispFormats();
+ pixelFormat = formats::XRGB8888;
+ colorSpace = ColorSpace::Sycc;
+ size = { 800, 600 };
+ bufferCount = 4;
+ break;
+
+ default:
+ LOG(RPI, Error) << "Requested stream role not supported: "
+ << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ if (role == StreamRole::Raw) {
+ /* Translate the MBUS codes to a PixelFormat. */
+ for (const auto &format : data->sensorFormats_) {
+ PixelFormat pf = mbusCodeToPixelFormat(format.first,
+ BayerFormat::Packing::CSI2);
+ if (pf.isValid())
+ deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
+ std::forward_as_tuple(format.second.begin(), format.second.end()));
+ }
+ } else {
+ /*
+ * Translate the V4L2PixelFormat to PixelFormat. Note that we
+ * limit the recommended largest ISP output size to match the
+ * sensor resolution.
+ */
+ for (const auto &format : fmts) {
+ PixelFormat pf = format.first.toPixelFormat();
+ /*
+ * Some V4L2 formats translate to the same pixel format (e.g. YU12, YM12
+ * both give YUV420). We must avoid duplicating the range in this case.
+ */
+ if (pf.isValid() && deviceFormats.find(pf) == deviceFormats.end()) {
+ const SizeRange &ispSizes = format.second[0];
+ deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
+ ispSizes.hStep, ispSizes.vStep);
+ }
+ }
+ }
+
+ /* Add the stream format based on the device node used for the use case. */
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.colorSpace = colorSpace;
+ cfg.bufferCount = bufferCount;
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Start by freeing all buffers and reset the stream states. */
+ data->freeBuffers();
+ for (auto const stream : data->streams_)
+ stream->clearFlags(StreamFlag::External);
+
+ /*
+ * Apply the format on the sensor with any cached transform.
+ *
+ * If the application has provided a sensor configuration apply it
+ * instead of just applying a format.
+ */
+ RPiCameraConfiguration *rpiConfig = static_cast<RPiCameraConfiguration *>(config);
+ V4L2SubdeviceFormat *sensorFormat = &rpiConfig->sensorFormat_;
+
+ if (rpiConfig->sensorConfig) {
+ ret = data->sensor_->applyConfiguration(*rpiConfig->sensorConfig,
+ rpiConfig->combinedTransform_,
+ sensorFormat);
+ } else {
+ ret = data->sensor_->setFormat(sensorFormat,
+ rpiConfig->combinedTransform_);
+ }
+ if (ret)
+ return ret;
+
+ /*
+ * Platform specific internal stream configuration. This also assigns
+ * external streams which get configured below.
+ */
+ ret = data->platformConfigure(rpiConfig);
+ if (ret)
+ return ret;
+
+ ipa::RPi::ConfigResult result;
+ ret = data->configureIPA(config, &result);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
+ return ret;
+ }
+
+ /*
+ * Set the scaler crop to the value we are using (scaled to native sensor
+ * coordinates).
+ */
+ data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
+
+ /*
+ * Update the ScalerCropMaximum to the correct value for this camera mode.
+ * For us, it's the same as the "analogue crop".
+ *
+ * \todo Make this property the ScalerCrop maximum value when dynamic
+ * controls are available and set it at validate() time
+ */
+ data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
+
+ /* Store the mode sensitivity for the application. */
+ data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
+
+ /* Update the controls that the Raspberry Pi IPA can handle. */
+ ControlInfoMap::Map ctrlMap;
+ for (auto const &c : result.controlInfo)
+ ctrlMap.emplace(c.first, c.second);
+
+ /* Add the ScalerCrop control limits based on the current mode. */
+ Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(data->ispMinCropSize_));
+ ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, data->scalerCrop_);
+
+ data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
+
+ /* Setup the Video Mux/Bridge entities. */
+ for (auto &[device, link] : data->bridgeDevices_) {
+ /*
+ * Start by disabling all the sink pad links on the devices in the
+ * cascade, with the exception of the link connecting the device.
+ */
+ for (const MediaPad *p : device->entity()->pads()) {
+ if (!(p->flags() & MEDIA_PAD_FL_SINK))
+ continue;
+
+ for (MediaLink *l : p->links()) {
+ if (l != link)
+ l->setEnabled(false);
+ }
+ }
+
+ /*
+ * Next, enable the entity -> entity links, and setup the pad format.
+ *
+ * \todo Some bridge devices may chainge the media bus code, so we
+ * ought to read the source pad format and propagate it to the sink pad.
+ */
+ link->setEnabled(true);
+ const MediaPad *sinkPad = link->sink();
+ ret = device->setFormat(sinkPad->index(), sensorFormat);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
+ << " pad " << sinkPad->index()
+ << " with format " << *sensorFormat
+ << ": " << ret;
+ return ret;
+ }
+
+ LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
+ << " on pad " << sinkPad->index();
+ }
+
+ return 0;
+}
+
+int PipelineHandlerBase::exportFrameBuffers([[maybe_unused]] Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ RPi::Stream *s = static_cast<RPi::Stream *>(stream);
+ unsigned int count = stream->configuration().bufferCount;
+ int ret = s->dev()->exportBuffers(count, buffers);
+
+ s->setExportedBuffers(buffers);
+
+ return ret;
+}
+
+int PipelineHandlerBase::start(Camera *camera, const ControlList *controls)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Check if a ScalerCrop control was specified. */
+ if (controls)
+ data->applyScalerCrop(*controls);
+
+ /* Start the IPA. */
+ ipa::RPi::StartResult result;
+ data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
+ &result);
+
+ /* Apply any gain/exposure settings that the IPA may have passed back. */
+ if (!result.controls.empty())
+ data->setSensorControls(result.controls);
+
+ /* Configure the number of dropped frames required on startup. */
+ data->dropFrameCount_ = data->config_.disableStartupFrameDrops
+ ? 0 : result.dropFrameCount;
+
+ for (auto const stream : data->streams_)
+ stream->resetBuffers();
+
+ if (!data->buffersAllocated_) {
+ /* Allocate buffers for internal pipeline usage. */
+ ret = prepareBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to allocate buffers";
+ data->freeBuffers();
+ stop(camera);
+ return ret;
+ }
+ data->buffersAllocated_ = true;
+ }
+
+ /* We need to set the dropFrameCount_ before queueing buffers. */
+ ret = queueAllBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to queue buffers";
+ stop(camera);
+ return ret;
+ }
+
+ /*
+ * Reset the delayed controls with the gain and exposure values set by
+ * the IPA.
+ */
+ data->delayedCtrls_->reset(0);
+ data->state_ = CameraData::State::Idle;
+
+ /* Enable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(true);
+
+ data->platformStart();
+
+ /* Start all streams. */
+ for (auto const stream : data->streams_) {
+ ret = stream->dev()->streamOn();
+ if (ret) {
+ stop(camera);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerBase::stopDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+
+ data->state_ = CameraData::State::Stopped;
+ data->platformStop();
+
+ for (auto const stream : data->streams_)
+ stream->dev()->streamOff();
+
+ /* Disable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(false);
+
+ data->clearIncompleteRequests();
+
+ /* Stop the IPA. */
+ data->ipa_->stop();
+}
+
+void PipelineHandlerBase::releaseDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ data->freeBuffers();
+}
+
+int PipelineHandlerBase::queueRequestDevice(Camera *camera, Request *request)
+{
+ CameraData *data = cameraData(camera);
+
+ if (!data->isRunning())
+ return -EINVAL;
+
+ LOG(RPI, Debug) << "queueRequestDevice: New request sequence: "
+ << request->sequence();
+
+ /* Push all buffers supplied in the Request to the respective streams. */
+ for (auto stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External))
+ continue;
+
+ FrameBuffer *buffer = request->findBuffer(stream);
+ if (buffer && !stream->getBufferId(buffer)) {
+ /*
+ * This buffer is not recognised, so it must have been allocated
+ * outside the v4l2 device. Store it in the stream buffer list
+ * so we can track it.
+ */
+ stream->setExportedBuffer(buffer);
+ }
+
+ /*
+ * If no buffer is provided by the request for this stream, we
+ * queue a nullptr to the stream to signify that it must use an
+ * internally allocated buffer for this capture request. This
+ * buffer will not be given back to the application, but is used
+ * to support the internal pipeline flow.
+ *
+ * The below queueBuffer() call will do nothing if there are not
+ * enough internal buffers allocated, but this will be handled by
+ * queuing the request for buffers in the RPiStream object.
+ */
+ int ret = stream->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ /* Push the request to the back of the queue. */
+ data->requestQueue_.push(request);
+ data->handleState();
+
+ return 0;
+}
+
+int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontend, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity)
+{
+ CameraData *data = cameraData.get();
+ int ret;
+
+ data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
+ if (!data->sensor_)
+ return -EINVAL;
+
+ if (data->sensor_->init())
+ return -EINVAL;
+
+ /* Populate the map of sensor supported formats and sizes. */
+ for (auto const mbusCode : data->sensor_->mbusCodes())
+ data->sensorFormats_.emplace(mbusCode,
+ data->sensor_->sizes(mbusCode));
+
+ /*
+ * Enumerate all the Video Mux/Bridge devices across the sensor -> Fr
+ * chain. There may be a cascade of devices in this chain!
+ */
+ MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
+ data->enumerateVideoDevices(link, frontendName);
+
+ ipa::RPi::InitResult result;
+ if (data->loadIPA(&result)) {
+ LOG(RPI, Error) << "Failed to load a suitable IPA library";
+ return -EINVAL;
+ }
+
+ /*
+ * Setup our delayed control writer with the sensor default
+ * gain and exposure delays. Mark VBLANK for priority write.
+ */
+ std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
+ { V4L2_CID_HBLANK, { result.sensorConfig.hblankDelay, false } },
+ { V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
+ };
+ data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
+ data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
+
+ /* Register initial controls that the Raspberry Pi IPA can handle. */
+ data->controlInfo_ = std::move(result.controlInfo);
+
+ /* Initialize the camera properties. */
+ data->properties_ = data->sensor_->properties();
+
+ /*
+ * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
+ * sensor of the colour gains. It is defined to be a linear gain where
+ * the default value represents a gain of exactly one.
+ */
+ auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
+ if (it != data->sensor_->controls().end())
+ data->notifyGainsUnity_ = it->second.def().get<int32_t>();
+
+ /*
+ * Set a default value for the ScalerCropMaximum property to show
+ * that we support its use, however, initialise it to zero because
+ * it's not meaningful until a camera mode has been chosen.
+ */
+ data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
+
+ ret = platformRegister(cameraData, frontend, backend);
+ if (ret)
+ return ret;
+
+ ret = data->loadPipelineConfiguration();
+ if (ret) {
+ LOG(RPI, Error) << "Unable to load pipeline configuration";
+ return ret;
+ }
+
+ /* Setup the general IPA signal handlers. */
+ data->frontendDevice()->dequeueTimeout.connect(data, &RPi::CameraData::cameraTimeout);
+ data->frontendDevice()->frameStart.connect(data, &RPi::CameraData::frameStarted);
+ data->ipa_->setDelayedControls.connect(data, &CameraData::setDelayedControls);
+ data->ipa_->setLensControls.connect(data, &CameraData::setLensControls);
+ data->ipa_->metadataReady.connect(data, &CameraData::metadataReady);
+
+ return 0;
+}
+
+void PipelineHandlerBase::mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask)
+{
+ CameraData *data = cameraData(camera);
+ std::vector<IPABuffer> bufferIds;
+ /*
+ * Link the FrameBuffers with the id (key value) in the map stored in
+ * the RPi stream object - along with an identifier mask.
+ *
+ * This will allow us to identify buffers passed between the pipeline
+ * handler and the IPA.
+ */
+ for (auto const &it : buffers) {
+ bufferIds.push_back(IPABuffer(mask | it.first,
+ it.second.buffer->planes()));
+ data->bufferIds_.insert(mask | it.first);
+ }
+
+ data->ipa_->mapBuffers(bufferIds);
+}
+
+int PipelineHandlerBase::queueAllBuffers(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ for (auto const stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External)) {
+ ret = stream->queueAllBuffers();
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * For external streams, we must queue up a set of internal
+ * buffers to handle the number of drop frames requested by
+ * the IPA. This is done by passing nullptr in queueBuffer().
+ *
+ * The below queueBuffer() call will do nothing if there
+ * are not enough internal buffers allocated, but this will
+ * be handled by queuing the request for buffers in the
+ * RPiStream object.
+ */
+ unsigned int i;
+ for (i = 0; i < data->dropFrameCount_; i++) {
+ ret = stream->queueBuffer(nullptr);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+double CameraData::scoreFormat(double desired, double actual) const
+{
+ double score = desired - actual;
+ /* Smaller desired dimensions are preferred. */
+ if (score < 0.0)
+ score = (-score) / 8;
+ /* Penalise non-exact matches. */
+ if (actual != desired)
+ score *= 2;
+
+ return score;
+}
+
+V4L2SubdeviceFormat CameraData::findBestFormat(const Size &req, unsigned int bitDepth) const
+{
+ double bestScore = std::numeric_limits<double>::max(), score;
+ V4L2SubdeviceFormat bestFormat;
+ bestFormat.colorSpace = ColorSpace::Raw;
+
+ constexpr float penaltyAr = 1500.0;
+ constexpr float penaltyBitDepth = 500.0;
+
+ /* Calculate the closest/best mode from the user requested size. */
+ for (const auto &iter : sensorFormats_) {
+ const unsigned int mbusCode = iter.first;
+ const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
+ BayerFormat::Packing::None);
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ for (const Size &size : iter.second) {
+ double reqAr = static_cast<double>(req.width) / req.height;
+ double fmtAr = static_cast<double>(size.width) / size.height;
+
+ /* Score the dimensions for closeness. */
+ score = scoreFormat(req.width, size.width);
+ score += scoreFormat(req.height, size.height);
+ score += penaltyAr * scoreFormat(reqAr, fmtAr);
+
+ /* Add any penalties... this is not an exact science! */
+ score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
+
+ if (score <= bestScore) {
+ bestScore = score;
+ bestFormat.code = mbusCode;
+ bestFormat.size = size;
+ }
+
+ LOG(RPI, Debug) << "Format: " << size
+ << " fmt " << format
+ << " Score: " << score
+ << " (best " << bestScore << ")";
+ }
+ }
+
+ return bestFormat;
+}
+
+void CameraData::freeBuffers()
+{
+ if (ipa_) {
+ /*
+ * Copy the buffer ids from the unordered_set to a vector to
+ * pass to the IPA.
+ */
+ std::vector<unsigned int> bufferIds(bufferIds_.begin(),
+ bufferIds_.end());
+ ipa_->unmapBuffers(bufferIds);
+ bufferIds_.clear();
+ }
+
+ for (auto const stream : streams_)
+ stream->releaseBuffers();
+
+ platformFreeBuffers();
+
+ buffersAllocated_ = false;
+}
+
+/*
+ * enumerateVideoDevices() iterates over the Media Controller topology, starting
+ * at the sensor and finishing at the frontend. For each sensor, CameraData stores
+ * a unique list of any intermediate video mux or bridge devices connected in a
+ * cascade, together with the entity to entity link.
+ *
+ * Entity pad configuration and link enabling happens at the end of configure().
+ * We first disable all pad links on each entity device in the chain, and then
+ * selectively enabling the specific links to link sensor to the frontend across
+ * all intermediate muxes and bridges.
+ *
+ * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
+ * will be disabled, and Sensor1 -> Mux1 -> Frontend links enabled. Alternatively,
+ * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
+ * and Sensor3 -> Mux2 -> Mux1 -> Frontend links are enabled. All other links will
+ * remain unchanged.
+ *
+ * +----------+
+ * | FE |
+ * +-----^----+
+ * |
+ * +---+---+
+ * | Mux1 |<------+
+ * +--^---- |
+ * | |
+ * +-----+---+ +---+---+
+ * | Sensor1 | | Mux2 |<--+
+ * +---------+ +-^-----+ |
+ * | |
+ * +-------+-+ +---+-----+
+ * | Sensor2 | | Sensor3 |
+ * +---------+ +---------+
+ */
+void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend)
+{
+ const MediaPad *sinkPad = link->sink();
+ const MediaEntity *entity = sinkPad->entity();
+ bool frontendFound = false;
+
+ /* We only deal with Video Mux and Bridge devices in cascade. */
+ if (entity->function() != MEDIA_ENT_F_VID_MUX &&
+ entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
+ return;
+
+ /* Find the source pad for this Video Mux or Bridge device. */
+ const MediaPad *sourcePad = nullptr;
+ for (const MediaPad *pad : entity->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ /*
+ * We can only deal with devices that have a single source
+ * pad. If this device has multiple source pads, ignore it
+ * and this branch in the cascade.
+ */
+ if (sourcePad)
+ return;
+
+ sourcePad = pad;
+ }
+ }
+
+ LOG(RPI, Debug) << "Found video mux device " << entity->name()
+ << " linked to sink pad " << sinkPad->index();
+
+ bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
+ bridgeDevices_.back().first->open();
+
+ /*
+ * Iterate through all the sink pad links down the cascade to find any
+ * other Video Mux and Bridge devices.
+ */
+ for (MediaLink *l : sourcePad->links()) {
+ enumerateVideoDevices(l, frontend);
+ /* Once we reach the Frontend entity, we are done. */
+ if (l->sink()->entity()->name() == frontend) {
+ frontendFound = true;
+ break;
+ }
+ }
+
+ /* This identifies the end of our entity enumeration recursion. */
+ if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
+ /*
+ * If the frontend is not at the end of this cascade, we cannot
+ * configure this topology automatically, so remove all entity
+ * references.
+ */
+ if (!frontendFound) {
+ LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
+ bridgeDevices_.clear();
+ }
+ }
+}
+
+int CameraData::loadPipelineConfiguration()
+{
+ config_ = {
+ .disableStartupFrameDrops = false,
+ .cameraTimeoutValue = 0,
+ };
+
+ /* Initial configuration of the platform, in case no config file is present */
+ platformPipelineConfigure({});
+
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
+ if (!configFromEnv || *configFromEnv == '\0')
+ return 0;
+
+ std::string filename = std::string(configFromEnv);
+ File file(filename);
+
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPI, Warning) << "Failed to open configuration file '" << filename << "'"
+ << ", using defaults";
+ return 0;
+ }
+
+ LOG(RPI, Info) << "Using configuration file '" << filename << "'";
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root) {
+ LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
+ return 0;
+ }
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Warning) << "Unexpected configuration file version reported: "
+ << *ver;
+ return 0;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+
+ config_.disableStartupFrameDrops =
+ phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
+
+ config_.cameraTimeoutValue =
+ phConfig["camera_timeout_value_ms"].get<unsigned int>(config_.cameraTimeoutValue);
+
+ if (config_.cameraTimeoutValue) {
+ /* Disable the IPA signal to control timeout and set the user requested value. */
+ ipa_->setCameraTimeout.disconnect();
+ frontendDevice()->setDequeueTimeout(config_.cameraTimeoutValue * 1ms);
+ }
+
+ return platformPipelineConfigure(root);
+}
+
+int CameraData::loadIPA(ipa::RPi::InitResult *result)
+{
+ int ret;
+
+ ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
+
+ if (!ipa_)
+ return -ENOENT;
+
+ /*
+ * The configuration (tuning file) is made from the sensor name unless
+ * the environment variable overrides it.
+ */
+ std::string configurationFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ std::string model = sensor_->model();
+ if (isMonoSensor(sensor_))
+ model += "_mono";
+ configurationFile = ipa_->configurationFile(model + ".json");
+ } else {
+ configurationFile = std::string(configFromEnv);
+ }
+
+ IPASettings settings(configurationFile, sensor_->model());
+ ipa::RPi::InitParams params;
+
+ ret = sensor_->sensorInfo(&params.sensorInfo);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ params.lensPresent = !!sensor_->focusLens();
+ ret = platformInitIpa(params);
+ if (ret)
+ return ret;
+
+ return ipa_->init(settings, params, result);
+}
+
+int CameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
+{
+ ipa::RPi::ConfigParams params;
+ int ret;
+
+ params.sensorControls = sensor_->controls();
+ if (sensor_->focusLens())
+ params.lensControls = sensor_->focusLens()->controls();
+
+ ret = platformConfigureIpa(params);
+ if (ret)
+ return ret;
+
+ /* We store the IPACameraSensorInfo for digital zoom calculations. */
+ ret = sensor_->sensorInfo(&sensorInfo_);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ /* Always send the user transform to the IPA. */
+ Transform transform = config->orientation / Orientation::Rotate0;
+ params.transform = static_cast<unsigned int>(transform);
+
+ /* Ready the IPA - it must know about the sensor resolution. */
+ ret = ipa_->configure(sensorInfo_, params, result);
+ if (ret < 0) {
+ LOG(RPI, Error) << "IPA configuration failed!";
+ return -EPIPE;
+ }
+
+ if (!result->sensorControls.empty())
+ setSensorControls(result->sensorControls);
+ if (!result->lensControls.empty())
+ setLensControls(result->lensControls);
+
+ return 0;
+}
+
+void CameraData::metadataReady(const ControlList &metadata)
+{
+ if (!isRunning())
+ return;
+
+ /* Add to the Request metadata buffer what the IPA has provided. */
+ /* Last thing to do is to fill up the request metadata. */
+ Request *request = requestQueue_.front();
+ request->metadata().merge(metadata);
+
+ /*
+ * Inform the sensor of the latest colour gains if it has the
+ * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
+ */
+ const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
+ if (notifyGainsUnity_ && colourGains) {
+ /* The control wants linear gains in the order B, Gb, Gr, R. */
+ ControlList ctrls(sensor_->controls());
+ std::array<int32_t, 4> gains{
+ static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
+ *notifyGainsUnity_,
+ *notifyGainsUnity_,
+ static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
+ };
+ ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
+
+ sensor_->setControls(&ctrls);
+ }
+}
+
+void CameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
+{
+ if (!delayedCtrls_->push(controls, delayContext))
+ LOG(RPI, Error) << "V4L2 DelayedControl set failed";
+}
+
+void CameraData::setLensControls(const ControlList &controls)
+{
+ CameraLens *lens = sensor_->focusLens();
+
+ if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
+ ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
+ lens->setFocusPosition(focusValue.get<int32_t>());
+ }
+}
+
+void CameraData::setSensorControls(ControlList &controls)
+{
+ /*
+ * We need to ensure that if both VBLANK and EXPOSURE are present, the
+ * former must be written ahead of, and separately from EXPOSURE to avoid
+ * V4L2 rejecting the latter. This is identical to what DelayedControls
+ * does with the priority write flag.
+ *
+ * As a consequence of the below logic, VBLANK gets set twice, and we
+ * rely on the v4l2 framework to not pass the second control set to the
+ * driver as the actual control value has not changed.
+ */
+ if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
+ ControlList vblank_ctrl;
+
+ vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
+ sensor_->setControls(&vblank_ctrl);
+ }
+
+ sensor_->setControls(&controls);
+}
+
+Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
+{
+ /*
+ * Scale a crop rectangle defined in the ISP's coordinates into native sensor
+ * coordinates.
+ */
+ Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
+ sensorInfo_.outputSize);
+ nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
+ return nativeCrop;
+}
+
+void CameraData::applyScalerCrop(const ControlList &controls)
+{
+ const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
+ if (scalerCrop) {
+ Rectangle nativeCrop = *scalerCrop;
+
+ if (!nativeCrop.width || !nativeCrop.height)
+ nativeCrop = { 0, 0, 1, 1 };
+
+ /* Create a version of the crop scaled to ISP (camera mode) pixels. */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
+ ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
+
+ /*
+ * The crop that we set must be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
+
+ if (ispCrop != ispCrop_) {
+ ispCrop_ = ispCrop;
+ platformSetIspCrop();
+
+ /*
+ * Also update the ScalerCrop in the metadata with what we actually
+ * used. But we must first rescale that from ISP (camera mode) pixels
+ * back into sensor native pixels.
+ */
+ scalerCrop_ = scaleIspCrop(ispCrop_);
+ }
+ }
+}
+
+void CameraData::cameraTimeout()
+{
+ LOG(RPI, Error) << "Camera frontend has timed out!";
+ LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
+ LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
+
+ state_ = CameraData::State::Error;
+ platformStop();
+
+ /*
+ * To allow the application to attempt a recovery from this timeout,
+ * stop all devices streaming, and return any outstanding requests as
+ * incomplete and cancelled.
+ */
+ for (auto const stream : streams_)
+ stream->dev()->streamOff();
+
+ clearIncompleteRequests();
+}
+
+void CameraData::frameStarted(uint32_t sequence)
+{
+ LOG(RPI, Debug) << "Frame start " << sequence;
+
+ /* Write any controls for the next frame as soon as we can. */
+ delayedCtrls_->applyControls(sequence);
+}
+
+void CameraData::clearIncompleteRequests()
+{
+ /*
+ * All outstanding requests (and associated buffers) must be returned
+ * back to the application.
+ */
+ while (!requestQueue_.empty()) {
+ Request *request = requestQueue_.front();
+
+ for (auto &b : request->buffers()) {
+ FrameBuffer *buffer = b.second;
+ /*
+ * Has the buffer already been handed back to the
+ * request? If not, do so now.
+ */
+ if (buffer->request()) {
+ buffer->_d()->cancel();
+ pipe()->completeBuffer(request, buffer);
+ }
+ }
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ }
+}
+
+void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
+{
+ /*
+ * It is possible to be here without a pending request, so check
+ * that we actually have one to action, otherwise we just return
+ * buffer back to the stream.
+ */
+ Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
+ if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
+ /*
+ * Tag the buffer as completed, returning it to the
+ * application.
+ */
+ LOG(RPI, Debug) << "Completing request buffer for stream "
+ << stream->name();
+ pipe()->completeBuffer(request, buffer);
+ } else {
+ /*
+ * This buffer was not part of the Request (which happens if an
+ * internal buffer was used for an external stream, or
+ * unconditionally for internal streams), or there is no pending
+ * request, so we can recycle it.
+ */
+ LOG(RPI, Debug) << "Returning buffer to stream "
+ << stream->name();
+ stream->returnBuffer(buffer);
+ }
+}
+
+void CameraData::handleState()
+{
+ switch (state_) {
+ case State::Stopped:
+ case State::Busy:
+ case State::Error:
+ break;
+
+ case State::IpaComplete:
+ /* If the request is completed, we will switch to Idle state. */
+ checkRequestCompleted();
+ /*
+ * No break here, we want to try running the pipeline again.
+ * The fallthrough clause below suppresses compiler warnings.
+ */
+ [[fallthrough]];
+
+ case State::Idle:
+ tryRunPipeline();
+ break;
+ }
+}
+
+void CameraData::checkRequestCompleted()
+{
+ bool requestCompleted = false;
+ /*
+ * If we are dropping this frame, do not touch the request, simply
+ * change the state to IDLE when ready.
+ */
+ if (!dropFrameCount_) {
+ Request *request = requestQueue_.front();
+ if (request->hasPendingBuffers())
+ return;
+
+ /* Must wait for metadata to be filled in before completing. */
+ if (state_ != State::IpaComplete)
+ return;
+
+ LOG(RPI, Debug) << "Completing request sequence: "
+ << request->sequence();
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ requestCompleted = true;
+ }
+
+ /*
+ * Make sure we have three outputs completed in the case of a dropped
+ * frame.
+ */
+ if (state_ == State::IpaComplete &&
+ ((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) ||
+ requestCompleted)) {
+ LOG(RPI, Debug) << "Going into Idle state";
+ state_ = State::Idle;
+ if (dropFrameCount_) {
+ dropFrameCount_--;
+ LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
+ << dropFrameCount_ << " left)";
+ }
+ }
+}
+
+void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request *request)
+{
+ request->metadata().set(controls::SensorTimestamp,
+ bufferControls.get(controls::SensorTimestamp).value_or(0));
+
+ request->metadata().set(controls::ScalerCrop, scalerCrop_);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h
new file mode 100644
index 00000000..f9cecf70
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include <map>
+#include <memory>
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
+
+#include "delayed_controls.h"
+#include "rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+namespace RPi {
+
+/* Map of mbus codes to supported sizes reported by the sensor. */
+using SensorFormats = std::map<unsigned int, std::vector<Size>>;
+
+class RPiCameraConfiguration;
+class CameraData : public Camera::Private
+{
+public:
+ CameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe), state_(State::Stopped),
+ dropFrameCount_(0), buffersAllocated_(false),
+ ispOutputCount_(0), ispOutputTotal_(0)
+ {
+ }
+
+ virtual ~CameraData()
+ {
+ }
+
+ virtual CameraConfiguration::Status platformValidate(RPiCameraConfiguration *rpiConfig) const = 0;
+ virtual int platformConfigure(const RPiCameraConfiguration *rpiConfig) = 0;
+ virtual void platformStart() = 0;
+ virtual void platformStop() = 0;
+
+ double scoreFormat(double desired, double actual) const;
+ V4L2SubdeviceFormat findBestFormat(const Size &req, unsigned int bitDepth) const;
+
+ void freeBuffers();
+ virtual void platformFreeBuffers() = 0;
+
+ void enumerateVideoDevices(MediaLink *link, const std::string &frontend);
+
+ int loadPipelineConfiguration();
+ int loadIPA(ipa::RPi::InitResult *result);
+ int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
+ virtual int platformInitIpa(ipa::RPi::InitParams &params) = 0;
+ virtual int platformConfigureIpa(ipa::RPi::ConfigParams &params) = 0;
+
+ void metadataReady(const ControlList &metadata);
+ void setDelayedControls(const ControlList &controls, uint32_t delayContext);
+ void setLensControls(const ControlList &controls);
+ void setSensorControls(ControlList &controls);
+
+ Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
+ void applyScalerCrop(const ControlList &controls);
+ virtual void platformSetIspCrop() = 0;
+
+ void cameraTimeout();
+ void frameStarted(uint32_t sequence);
+
+ void clearIncompleteRequests();
+ void handleStreamBuffer(FrameBuffer *buffer, Stream *stream);
+ void handleState();
+
+ virtual V4L2VideoDevice::Formats ispFormats() const = 0;
+ virtual V4L2VideoDevice::Formats rawFormats() const = 0;
+ virtual V4L2VideoDevice *frontendDevice() = 0;
+
+ virtual int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) = 0;
+
+ std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ SensorFormats sensorFormats_;
+
+ /* The vector below is just for convenience when iterating over all streams. */
+ std::vector<Stream *> streams_;
+ /* Stores the ids of the buffers mapped in the IPA. */
+ std::unordered_set<unsigned int> bufferIds_;
+ /*
+ * Stores a cascade of Video Mux or Bridge devices between the sensor and
+ * Unicam together with media link across the entities.
+ */
+ std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+ bool sensorMetadata_;
+
+ /*
+ * All the functions in this class are called from a single calling
+ * thread. So, we do not need to have any mutex to protect access to any
+ * of the variables below.
+ */
+ enum class State { Stopped, Idle, Busy, IpaComplete, Error };
+ State state_;
+
+ bool isRunning()
+ {
+ return state_ != State::Stopped && state_ != State::Error;
+ }
+
+ std::queue<Request *> requestQueue_;
+
+ /* For handling digital zoom. */
+ IPACameraSensorInfo sensorInfo_;
+ Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
+ Rectangle scalerCrop_; /* crop in sensor native pixels */
+ Size ispMinCropSize_;
+
+ unsigned int dropFrameCount_;
+
+ /*
+ * If set, this stores the value that represets a gain of one for
+ * the V4L2_CID_NOTIFY_GAINS control.
+ */
+ std::optional<int32_t> notifyGainsUnity_;
+
+ /* Have internal buffers been allocated? */
+ bool buffersAllocated_;
+
+ struct Config {
+ /*
+ * Override any request from the IPA to drop a number of startup
+ * frames.
+ */
+ bool disableStartupFrameDrops;
+ /*
+ * Override the camera timeout value calculated by the IPA based
+ * on frame durations.
+ */
+ unsigned int cameraTimeoutValue;
+ };
+
+ Config config_;
+
+protected:
+ void fillRequestMetadata(const ControlList &bufferControls,
+ Request *request);
+
+ virtual void tryRunPipeline() = 0;
+
+ unsigned int ispOutputCount_;
+ unsigned int ispOutputTotal_;
+
+private:
+ void checkRequestCompleted();
+};
+
+class PipelineHandlerBase : public PipelineHandler
+{
+public:
+ PipelineHandlerBase(CameraManager *manager)
+ : PipelineHandler(manager)
+ {
+ }
+
+ virtual ~PipelineHandlerBase()
+ {
+ }
+
+ static bool isRgb(const PixelFormat &pixFmt);
+ static bool isYuv(const PixelFormat &pixFmt);
+ static bool isRaw(const PixelFormat &pixFmt);
+
+ static bool updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+protected:
+ int registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontent, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity);
+
+ void mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask);
+
+ virtual int platformRegister(std::unique_ptr<CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) = 0;
+
+private:
+ CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<CameraData *>(camera->_d());
+ }
+
+ int queueAllBuffers(Camera *camera);
+ virtual int prepareBuffers(Camera *camera) = 0;
+};
+
+class RPiCameraConfiguration final : public CameraConfiguration
+{
+public:
+ RPiCameraConfiguration(const CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
+ Status validate() override;
+
+ /* Cache the combinedTransform_ that will be applied to the sensor */
+ Transform combinedTransform_;
+ /* The sensor format computed in validate() */
+ V4L2SubdeviceFormat sensorFormat_;
+
+ struct StreamParams {
+ StreamParams()
+ : index(0), cfg(nullptr), dev(nullptr)
+ {
+ }
+
+ StreamParams(unsigned int index_, StreamConfiguration *cfg_)
+ : index(index_), cfg(cfg_), dev(nullptr)
+ {
+ }
+
+ unsigned int index;
+ StreamConfiguration *cfg;
+ V4L2VideoDevice *dev;
+ V4L2DeviceFormat format;
+ };
+
+ std::vector<StreamParams> rawStreams_;
+ std::vector<StreamParams> outStreams_;
+
+ /*
+ * Store the colour spaces that all our streams will have. RGB format streams
+ * will have the same colorspace as YUV streams, with YCbCr field cleared and
+ * range set to full.
+ */
+ std::optional<ColorSpace> yuvColorSpace_;
+ std::optional<ColorSpace> rgbColorSpace_;
+
+private:
+ const CameraData *data_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/rpi_stream.cpp b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
index 7a93efaa..accf59eb 100644
--- a/src/libcamera/pipeline/raspberrypi/rpi_stream.cpp
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
@@ -1,14 +1,19 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * rpi_stream.cpp - Raspberry Pi device stream abstraction class.
+ * Raspberry Pi device stream abstraction class.
*/
#include "rpi_stream.h"
+#include <algorithm>
+#include <tuple>
+#include <utility>
+
#include <libcamera/base/log.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+/* Maximum number of buffer slots to allocate in the V4L2 device driver. */
+static constexpr unsigned int maxV4L2BufferCount = 32;
namespace libcamera {
@@ -16,40 +21,64 @@ LOG_DEFINE_CATEGORY(RPISTREAM)
namespace RPi {
+const BufferObject Stream::errorBufferObject{ nullptr, false };
+
+void Stream::setFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ |= flags;
+
+ /* Import streams cannot be external. */
+ ASSERT(!(flags_ & StreamFlag::External) || !(flags_ & StreamFlag::ImportOnly));
+}
+
+void Stream::clearFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ &= ~flags;
+}
+
+RPi::Stream::StreamFlags Stream::getFlags() const
+{
+ return flags_;
+}
+
V4L2VideoDevice *Stream::dev() const
{
return dev_.get();
}
-std::string Stream::name() const
+const std::string &Stream::name() const
{
return name_;
}
-void Stream::resetBuffers()
+unsigned int Stream::swDownscale() const
{
- /* Add all internal buffers to the queue of usable buffers. */
- availableBuffers_ = {};
- for (auto const &buffer : internalBuffers_)
- availableBuffers_.push(buffer.get());
+ return swDownscale_;
}
-void Stream::setExternal(bool external)
+void Stream::setSwDownscale(unsigned int swDownscale)
{
- /* Import streams cannot be external. */
- ASSERT(!external || !importOnly_);
- external_ = external;
+ swDownscale_ = swDownscale;
}
-bool Stream::isExternal() const
+void Stream::resetBuffers()
{
- return external_;
+ /* Add all internal buffers to the queue of usable buffers. */
+ availableBuffers_ = {};
+ for (auto const &buffer : internalBuffers_)
+ availableBuffers_.push(buffer.get());
}
void Stream::setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
for (auto const &buffer : *buffers)
- bufferMap_.emplace(id_.get(), buffer.get());
+ bufferEmplace(++id_, buffer.get());
}
const BufferMap &Stream::getBuffers() const
@@ -57,68 +86,42 @@ const BufferMap &Stream::getBuffers() const
return bufferMap_;
}
-int Stream::getBufferId(FrameBuffer *buffer) const
+unsigned int Stream::getBufferId(FrameBuffer *buffer) const
{
- if (importOnly_)
- return -1;
+ if (flags_ & StreamFlag::ImportOnly)
+ return 0;
/* Find the buffer in the map, and return the buffer id. */
auto it = std::find_if(bufferMap_.begin(), bufferMap_.end(),
- [&buffer](auto const &p) { return p.second == buffer; });
+ [&buffer](auto const &p) { return p.second.buffer == buffer; });
if (it == bufferMap_.end())
- return -1;
+ return 0;
return it->first;
}
-void Stream::setExternalBuffer(FrameBuffer *buffer)
+void Stream::setExportedBuffer(FrameBuffer *buffer)
{
- bufferMap_.emplace(ipa::RPi::MaskExternalBuffer | id_.get(), buffer);
-}
-
-void Stream::removeExternalBuffer(FrameBuffer *buffer)
-{
- int id = getBufferId(buffer);
-
- /* Ensure we have this buffer in the stream, and it is marked external. */
- ASSERT(id != -1 && (id & ipa::RPi::MaskExternalBuffer));
- bufferMap_.erase(id);
+ bufferEmplace(++id_, buffer);
}
int Stream::prepareBuffers(unsigned int count)
{
int ret;
- if (!importOnly_) {
- if (count) {
- /* Export some frame buffers for internal use. */
- ret = dev_->exportBuffers(count, &internalBuffers_);
- if (ret < 0)
- return ret;
-
- /* Add these exported buffers to the internal/external buffer list. */
- setExportedBuffers(&internalBuffers_);
- resetBuffers();
- }
+ if (!(flags_ & StreamFlag::ImportOnly)) {
+ /* Export some frame buffers for internal use. */
+ ret = dev_->exportBuffers(count, &internalBuffers_);
+ if (ret < 0)
+ return ret;
- /* We must import all internal/external exported buffers. */
- count = bufferMap_.size();
+ /* Add these exported buffers to the internal/external buffer list. */
+ setExportedBuffers(&internalBuffers_);
+ resetBuffers();
}
- /*
- * If this is an external stream, we must allocate slots for buffers that
- * might be externally allocated. We have no indication of how many buffers
- * may be used, so this might overallocate slots in the buffer cache.
- * Similarly, if this stream is only importing buffers, we do the same.
- *
- * \todo Find a better heuristic, or, even better, an exact solution to
- * this issue.
- */
- if (isExternal() || importOnly_)
- count = count * 2;
-
- return dev_->importBuffers(count);
+ return dev_->importBuffers(maxV4L2BufferCount);
}
int Stream::queueBuffer(FrameBuffer *buffer)
@@ -162,7 +165,7 @@ int Stream::queueBuffer(FrameBuffer *buffer)
void Stream::returnBuffer(FrameBuffer *buffer)
{
- if (!external_) {
+ if (!(flags_ & StreamFlag::External) && !(flags_ & StreamFlag::Recurrent)) {
/* For internal buffers, simply requeue back to the device. */
queueToDevice(buffer);
return;
@@ -171,9 +174,6 @@ void Stream::returnBuffer(FrameBuffer *buffer)
/* Push this buffer back into the queue to be used again. */
availableBuffers_.push(buffer);
- /* Allow the buffer id to be reused. */
- id_.release(getBufferId(buffer));
-
/*
* Do we have any Request buffers that are waiting to be queued?
* If so, do it now as availableBuffers_ will not be empty.
@@ -202,11 +202,32 @@ void Stream::returnBuffer(FrameBuffer *buffer)
}
}
+const BufferObject &Stream::getBuffer(unsigned int id)
+{
+ auto const &it = bufferMap_.find(id);
+ if (it == bufferMap_.end())
+ return errorBufferObject;
+
+ return it->second;
+}
+
+const BufferObject &Stream::acquireBuffer()
+{
+ /* No id provided, so pick up the next available buffer if possible. */
+ if (availableBuffers_.empty())
+ return errorBufferObject;
+
+ unsigned int id = getBufferId(availableBuffers_.front());
+ availableBuffers_.pop();
+
+ return getBuffer(id);
+}
+
int Stream::queueAllBuffers()
{
int ret;
- if (external_)
+ if ((flags_ & StreamFlag::External) || (flags_ & StreamFlag::Recurrent))
return 0;
while (!availableBuffers_.empty()) {
@@ -226,13 +247,23 @@ void Stream::releaseBuffers()
clearBuffers();
}
+void Stream::bufferEmplace(unsigned int id, FrameBuffer *buffer)
+{
+ if (flags_ & StreamFlag::RequiresMmap)
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, true));
+ else
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, false));
+}
+
void Stream::clearBuffers()
{
availableBuffers_ = std::queue<FrameBuffer *>{};
requestBuffers_ = std::queue<FrameBuffer *>{};
internalBuffers_.clear();
bufferMap_.clear();
- id_.reset();
+ id_ = 0;
}
int Stream::queueToDevice(FrameBuffer *buffer)
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.h b/src/libcamera/pipeline/rpi/common/rpi_stream.h
new file mode 100644
index 00000000..a13d5dc0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Raspberry Pi device stream abstraction class.
+ */
+
+#pragma once
+
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+namespace RPi {
+
+enum BufferMask {
+ MaskID = 0x00ffff,
+ MaskStats = 0x010000,
+ MaskEmbeddedData = 0x020000,
+ MaskBayerData = 0x040000,
+};
+
+struct BufferObject {
+ BufferObject(FrameBuffer *b, bool requiresMmap)
+ : buffer(b), mapped(std::nullopt)
+ {
+ if (requiresMmap)
+ mapped = std::make_optional<MappedFrameBuffer>
+ (b, MappedFrameBuffer::MapFlag::ReadWrite);
+ }
+
+ FrameBuffer *buffer;
+ std::optional<MappedFrameBuffer> mapped;
+};
+
+using BufferMap = std::unordered_map<unsigned int, BufferObject>;
+
+/*
+ * Device stream abstraction for either an internal or external stream.
+ * Used for both Unicam and the ISP.
+ */
+class Stream : public libcamera::Stream
+{
+public:
+ enum class StreamFlag {
+ None = 0,
+ /*
+ * Indicates that this stream only imports buffers, e.g. the ISP
+ * input stream.
+ */
+ ImportOnly = (1 << 0),
+ /*
+ * Indicates that this stream is active externally, i.e. the
+ * buffers might be provided by (and returned to) the application.
+ */
+ External = (1 << 1),
+ /*
+ * Indicates that the stream buffers need to be mmaped and returned
+ * to the pipeline handler when requested.
+ */
+ RequiresMmap = (1 << 2),
+ /*
+ * Indicates a stream that needs buffers recycled every frame internally
+ * in the pipeline handler, e.g. stitch, TDN, config. All buffer
+ * management will be handled by the pipeline handler.
+ */
+ Recurrent = (1 << 3),
+ /*
+ * Indicates that the output stream needs a software format conversion
+ * to be applied after ISP processing.
+ */
+ Needs32bitConv = (1 << 4),
+ };
+
+ using StreamFlags = Flags<StreamFlag>;
+
+ Stream()
+ : flags_(StreamFlag::None), id_(0), swDownscale_(0)
+ {
+ }
+
+ Stream(const char *name, MediaEntity *dev, StreamFlags flags = StreamFlag::None)
+ : flags_(flags), name_(name),
+ dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(0),
+ swDownscale_(0)
+ {
+ }
+
+ void setFlags(StreamFlags flags);
+ void clearFlags(StreamFlags flags);
+ StreamFlags getFlags() const;
+
+ V4L2VideoDevice *dev() const;
+ const std::string &name() const;
+ void resetBuffers();
+
+ unsigned int swDownscale() const;
+ void setSwDownscale(unsigned int swDownscale);
+
+ void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+ const BufferMap &getBuffers() const;
+ unsigned int getBufferId(FrameBuffer *buffer) const;
+
+ void setExportedBuffer(FrameBuffer *buffer);
+
+ int prepareBuffers(unsigned int count);
+ int queueBuffer(FrameBuffer *buffer);
+ void returnBuffer(FrameBuffer *buffer);
+
+ const BufferObject &getBuffer(unsigned int id);
+ const BufferObject &acquireBuffer();
+
+ int queueAllBuffers();
+ void releaseBuffers();
+
+ /* For error handling. */
+ static const BufferObject errorBufferObject;
+
+private:
+ void bufferEmplace(unsigned int id, FrameBuffer *buffer);
+ void clearBuffers();
+ int queueToDevice(FrameBuffer *buffer);
+
+ StreamFlags flags_;
+
+ /* Stream name identifier. */
+ std::string name_;
+
+ /* The actual device stream. */
+ std::unique_ptr<V4L2VideoDevice> dev_;
+
+ /* Tracks a unique id key for the bufferMap_ */
+ unsigned int id_;
+
+ /* Power of 2 greater than one if software downscaling will be required. */
+ unsigned int swDownscale_;
+
+ /* All frame buffers associated with this device stream. */
+ BufferMap bufferMap_;
+
+ /*
+ * List of frame buffers that we can use if none have been provided by
+ * the application for external streams. This is populated by the
+ * buffers exported internally.
+ */
+ std::queue<FrameBuffer *> availableBuffers_;
+
+ /*
+ * List of frame buffers that are to be queued into the device from a Request.
+ * A nullptr indicates any internal buffer can be used (from availableBuffers_),
+ * whereas a valid pointer indicates an external buffer to be queued.
+ *
+ * Ordering buffers to be queued is important here as it must match the
+ * requests coming from the application.
+ */
+ std::queue<FrameBuffer *> requestBuffers_;
+
+ /*
+ * This is a list of buffers exported internally. Need to keep this around
+ * as the stream needs to maintain ownership of these buffers.
+ */
+ std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
+};
+
+/*
+ * The following class is just a convenient (and typesafe) array of device
+ * streams indexed with an enum class.
+ */
+template<typename E, std::size_t N>
+class Device : public std::array<class Stream, N>
+{
+public:
+ Stream &operator[](E e)
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+ const Stream &operator[](E e) const
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+};
+
+} /* namespace RPi */
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(RPi::Stream::StreamFlag)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/meson.build b/src/libcamera/pipeline/rpi/meson.build
new file mode 100644
index 00000000..2391b6a9
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('common')
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')
+ if pipeline.length() < 2 or pipeline[0] != 'rpi'
+ continue
+ endif
+
+ subdir(pipeline[1])
+endforeach
diff --git a/src/libcamera/pipeline/rpi/vc4/data/example.yaml b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
new file mode 100644
index 00000000..b8e01ade
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
@@ -0,0 +1,46 @@
+{
+ "version": 1.0,
+ "target": "bcm2835",
+
+ "pipeline_handler":
+ {
+ # The minimum number of internal buffers to be allocated for
+ # Unicam. This value must be greater than 0, but less than or
+ # equal to min_total_unicam_buffers.
+ #
+ # A larger number of internal buffers can reduce the occurrence
+ # of frame drops during high CPU loads, but might also cause
+ # additional latency in the system.
+ #
+ # Note that the pipeline handler might override this value and
+ # not allocate any internal buffers if it knows they will never
+ # be used. For example if the RAW stream is marked as mandatory
+ # and there are no dropped frames signalled for algorithm
+ # convergence.
+ #
+ # "min_unicam_buffers": 2,
+
+ # The minimum total (internal + external) buffer count used for
+ # Unicam. The number of internal buffers allocated for Unicam is
+ # given by:
+ #
+ # internal buffer count = max(min_unicam_buffers,
+ # min_total_unicam_buffers - external buffer count)
+ #
+ # "min_total_unicam_buffers": 4,
+
+ # Override any request from the IPA to drop a number of startup
+ # frames.
+ #
+ # "disable_startup_frame_drops": false,
+
+ # Custom timeout value (in ms) for camera to use. This overrides
+ # the value computed by the pipeline handler based on frame
+ # durations.
+ #
+ # Set this value to 0 to use the pipeline handler computed
+ # timeout value.
+ #
+ # "camera_timeout_value_ms": 0,
+ }
+}
diff --git a/src/libcamera/pipeline/rpi/vc4/data/meson.build b/src/libcamera/pipeline/rpi/vc4/data/meson.build
new file mode 100644
index 00000000..179feebc
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'example.yaml',
+])
+
+install_data(conf_files,
+ install_dir : pipeline_data_dir / 'rpi' / 'vc4',
+ install_tag : 'runtime')
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
new file mode 100644
index 00000000..386e2296
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'vc4.cpp',
+])
+
+subdir('data')
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
new file mode 100644
index 00000000..37fb310f
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -0,0 +1,1023 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler for VC4-based Raspberry Pi devices
+ */
+
+#include <linux/bcm2835-isp.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/dma_heaps.h"
+
+#include "../common/pipeline_base.h"
+#include "../common/rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+using StreamParams = RPi::RPiCameraConfiguration::StreamParams;
+
+namespace {
+
+enum class Unicam : unsigned int { Image, Embedded };
+enum class Isp : unsigned int { Input, Output0, Output1, Stats };
+
+} /* namespace */
+
+class Vc4CameraData final : public RPi::CameraData
+{
+public:
+ Vc4CameraData(PipelineHandler *pipe)
+ : RPi::CameraData(pipe)
+ {
+ }
+
+ ~Vc4CameraData()
+ {
+ freeBuffers();
+ }
+
+ V4L2VideoDevice::Formats ispFormats() const override
+ {
+ return isp_[Isp::Output0].dev()->formats();
+ }
+
+ V4L2VideoDevice::Formats rawFormats() const override
+ {
+ return unicam_[Unicam::Image].dev()->formats();
+ }
+
+ V4L2VideoDevice *frontendDevice() override
+ {
+ return unicam_[Unicam::Image].dev();
+ }
+
+ void platformFreeBuffers() override
+ {
+ }
+
+ CameraConfiguration::Status platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const override;
+
+ int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) override;
+
+ void platformStart() override;
+ void platformStop() override;
+
+ void unicamBufferDequeue(FrameBuffer *buffer);
+ void ispInputDequeue(FrameBuffer *buffer);
+ void ispOutputDequeue(FrameBuffer *buffer);
+
+ void processStatsComplete(const ipa::RPi::BufferIds &buffers);
+ void prepareIspComplete(const ipa::RPi::BufferIds &buffers, bool stitchSwapBuffers);
+ void setIspControls(const ControlList &controls);
+ void setCameraTimeout(uint32_t maxFrameLengthMs);
+
+ /* Array of Unicam and ISP device streams and associated buffers/streams. */
+ RPi::Device<Unicam, 2> unicam_;
+ RPi::Device<Isp, 4> isp_;
+
+ /* DMAHEAP allocation helper. */
+ DmaHeap dmaHeap_;
+ SharedFD lsTable_;
+
+ struct Config {
+ /*
+ * The minimum number of internal buffers to be allocated for
+ * the Unicam Image stream.
+ */
+ unsigned int minUnicamBuffers;
+ /*
+ * The minimum total (internal + external) buffer count used for
+ * the Unicam Image stream.
+ *
+ * Note that:
+ * minTotalUnicamBuffers must be >= 1, and
+ * minTotalUnicamBuffers >= minUnicamBuffers
+ */
+ unsigned int minTotalUnicamBuffers;
+ };
+
+ Config config_;
+
+private:
+ void platformSetIspCrop() override
+ {
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop_);
+ }
+
+ int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override;
+ int platformConfigureIpa(ipa::RPi::ConfigParams &params) override;
+
+ int platformInitIpa([[maybe_unused]] ipa::RPi::InitParams &params) override
+ {
+ return 0;
+ }
+
+ struct BayerFrame {
+ FrameBuffer *buffer;
+ ControlList controls;
+ unsigned int delayContext;
+ };
+
+ void tryRunPipeline() override;
+ bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
+
+ std::queue<BayerFrame> bayerQueue_;
+ std::queue<FrameBuffer *> embeddedQueue_;
+};
+
+class PipelineHandlerVc4 : public RPi::PipelineHandlerBase
+{
+public:
+ PipelineHandlerVc4(CameraManager *manager)
+ : RPi::PipelineHandlerBase(manager)
+ {
+ }
+
+ ~PipelineHandlerVc4()
+ {
+ }
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ Vc4CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<Vc4CameraData *>(camera->_d());
+ }
+
+ int prepareBuffers(Camera *camera) override;
+ int platformRegister(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) override;
+};
+
+bool PipelineHandlerVc4::match(DeviceEnumerator *enumerator)
+{
+ constexpr unsigned int numUnicamDevices = 2;
+
+ /*
+ * Loop over all Unicam instances, but return out once a match is found.
+ * This is to ensure we correctly enumrate the camera when an instance
+ * of Unicam has registered with media controller, but has not registered
+ * device nodes due to a sensor subdevice failure.
+ */
+ for (unsigned int i = 0; i < numUnicamDevices; i++) {
+ DeviceMatch unicam("unicam");
+ MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
+
+ if (!unicamDevice) {
+ LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
+ continue;
+ }
+
+ DeviceMatch isp("bcm2835-isp");
+ MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
+
+ if (!ispDevice) {
+ LOG(RPI, Debug) << "Unable to acquire ISP instance";
+ continue;
+ }
+
+ /*
+ * The loop below is used to register multiple cameras behind one or more
+ * video mux devices that are attached to a particular Unicam instance.
+ * Obviously these cameras cannot be used simultaneously.
+ */
+ unsigned int numCameras = 0;
+ for (MediaEntity *entity : unicamDevice->entities()) {
+ if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<RPi::CameraData> cameraData = std::make_unique<Vc4CameraData>(this);
+ int ret = RPi::PipelineHandlerBase::registerCamera(cameraData,
+ unicamDevice, "unicam-image",
+ ispDevice, entity);
+ if (ret)
+ LOG(RPI, Error) << "Failed to register camera "
+ << entity->name() << ": " << ret;
+ else
+ numCameras++;
+ }
+
+ if (numCameras)
+ return true;
+ }
+
+ return false;
+}
+
+int PipelineHandlerVc4::prepareBuffers(Camera *camera)
+{
+ Vc4CameraData *data = cameraData(camera);
+ unsigned int numRawBuffers = 0;
+ int ret;
+
+ for (Stream *s : camera->streams()) {
+ if (BayerFormat::fromPixelFormat(s->configuration().pixelFormat).isValid()) {
+ numRawBuffers = s->configuration().bufferCount;
+ break;
+ }
+ }
+
+ /* Decide how many internal buffers to allocate. */
+ for (auto const stream : data->streams_) {
+ unsigned int numBuffers;
+ /*
+ * For Unicam, allocate a minimum number of buffers for internal
+ * use as we want to avoid any frame drops.
+ */
+ const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
+ if (stream == &data->unicam_[Unicam::Image]) {
+ /*
+ * If an application has configured a RAW stream, allocate
+ * additional buffers to make up the minimum, but ensure
+ * we have at least minUnicamBuffers of internal buffers
+ * to use to minimise frame drops.
+ */
+ numBuffers = std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+ } else if (stream == &data->isp_[Isp::Input]) {
+ /*
+ * ISP input buffers are imported from Unicam, so follow
+ * similar logic as above to count all the RAW buffers
+ * available.
+ */
+ numBuffers = numRawBuffers +
+ std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+
+ } else if (stream == &data->unicam_[Unicam::Embedded]) {
+ /*
+ * Embedded data buffers are (currently) for internal use, and
+ * are small enough (typically 1-2KB) that we can
+ * allocate them generously to avoid causing problems in the
+ * IPA when we cannot supply the metadata.
+ *
+ * 12 are allocated as a typical application will have 8-10
+ * input buffers, so allocating more embedded buffers than that
+ * is a sensible choice.
+ *
+ * The lifetimes of these buffers are smaller than those of the
+ * raw buffers, so allocating a fixed number will still suffice
+ * if the application requests a greater number of raw
+ * buffers, as these will be recycled quicker.
+ */
+ numBuffers = 12;
+ } else {
+ /*
+ * Since the ISP runs synchronous with the IPA and requests,
+ * we only ever need one set of internal buffers. Any buffers
+ * the application wants to hold onto will already be exported
+ * through PipelineHandlerRPi::exportFrameBuffers().
+ */
+ numBuffers = 1;
+ }
+
+ LOG(RPI, Debug) << "Preparing " << numBuffers
+ << " buffers for stream " << stream->name();
+
+ ret = stream->prepareBuffers(numBuffers);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Pass the stats and embedded data buffers to the IPA. No other
+ * buffers need to be passed.
+ */
+ mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
+ if (data->sensorMetadata_)
+ mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
+ RPi::MaskEmbeddedData);
+
+ return 0;
+}
+
+int PipelineHandlerVc4::platformRegister(std::unique_ptr<RPi::CameraData> &cameraData, MediaDevice *unicam, MediaDevice *isp)
+{
+ Vc4CameraData *data = static_cast<Vc4CameraData *>(cameraData.get());
+
+ if (!data->dmaHeap_.isValid())
+ return -ENOMEM;
+
+ MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
+ MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
+ MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
+ MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
+ MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
+
+ if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
+ return -ENOENT;
+
+ /* Locate and open the unicam video streams. */
+ data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
+
+ /* An embedded data node will not be present if the sensor does not support it. */
+ MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
+ if (unicamEmbedded) {
+ data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data,
+ &Vc4CameraData::unicamBufferDequeue);
+ }
+
+ /* Tag the ISP input stream as an import stream. */
+ data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, StreamFlag::ImportOnly);
+ data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
+ data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
+ data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
+
+ /* Wire up all the buffer connections. */
+ data->unicam_[Unicam::Image].dev()->bufferReady.connect(data, &Vc4CameraData::unicamBufferDequeue);
+ data->isp_[Isp::Input].dev()->bufferReady.connect(data, &Vc4CameraData::ispInputDequeue);
+ data->isp_[Isp::Output0].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Output1].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Stats].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+
+ if (data->sensorMetadata_ ^ !!data->unicam_[Unicam::Embedded].dev()) {
+ LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
+ data->sensorMetadata_ = false;
+ if (data->unicam_[Unicam::Embedded].dev())
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
+ }
+
+ /*
+ * Open all Unicam and ISP streams. The exception is the embedded data
+ * stream, which only gets opened below if the IPA reports that the sensor
+ * supports embedded data.
+ *
+ * The below grouping is just for convenience so that we can easily
+ * iterate over all streams in one go.
+ */
+ data->streams_.push_back(&data->unicam_[Unicam::Image]);
+ if (data->sensorMetadata_)
+ data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
+
+ for (auto &stream : data->isp_)
+ data->streams_.push_back(&stream);
+
+ for (auto stream : data->streams_) {
+ int ret = stream->dev()->open();
+ if (ret)
+ return ret;
+ }
+
+ if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
+ LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
+ return -EINVAL;
+ }
+
+ /* Write up all the IPA connections. */
+ data->ipa_->processStatsComplete.connect(data, &Vc4CameraData::processStatsComplete);
+ data->ipa_->prepareIspComplete.connect(data, &Vc4CameraData::prepareIspComplete);
+ data->ipa_->setIspControls.connect(data, &Vc4CameraData::setIspControls);
+ data->ipa_->setCameraTimeout.connect(data, &Vc4CameraData::setCameraTimeout);
+
+ /*
+ * List the available streams an application may request. At present, we
+ * do not advertise Unicam Embedded and ISP Statistics streams, as there
+ * is no mechanism for the application to request non-image buffer formats.
+ */
+ std::set<Stream *> streams;
+ streams.insert(&data->unicam_[Unicam::Image]);
+ streams.insert(&data->isp_[Isp::Output0]);
+ streams.insert(&data->isp_[Isp::Output1]);
+
+ /* Create and register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(cameraData), id, streams);
+ PipelineHandler::registerCamera(std::move(camera));
+
+ LOG(RPI, Info) << "Registered camera " << id
+ << " to Unicam device " << unicam->deviceNode()
+ << " and ISP device " << isp->deviceNode();
+
+ return 0;
+}
+
+CameraConfiguration::Status Vc4CameraData::platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const
+{
+ std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+
+ CameraConfiguration::Status status = CameraConfiguration::Status::Valid;
+
+ /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
+ if (rawStreams.size() > 1 || outStreams.size() > 2) {
+ LOG(RPI, Error) << "Invalid number of streams requested";
+ return CameraConfiguration::Status::Invalid;
+ }
+
+ if (!rawStreams.empty()) {
+ rawStreams[0].dev = unicam_[Unicam::Image].dev();
+
+ /* Adjust the RAW stream to match the computed sensor format. */
+ StreamConfiguration *rawStream = rawStreams[0].cfg;
+ BayerFormat rawBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+
+ /* Apply the sensor bitdepth. */
+ rawBayer.bitDepth = BayerFormat::fromMbusCode(rpiConfig->sensorFormat_.code).bitDepth;
+
+ /* Default to CSI2 packing if the user request is unsupported. */
+ if (rawBayer.packing != BayerFormat::Packing::CSI2 &&
+ rawBayer.packing != BayerFormat::Packing::None)
+ rawBayer.packing = BayerFormat::Packing::CSI2;
+
+ PixelFormat rawFormat = rawBayer.toPixelFormat();
+
+ /*
+ * Try for an unpacked format if a packed one wasn't available.
+ * This catches 8 (and 16) bit formats which would otherwise
+ * fail.
+ */
+ if (!rawFormat.isValid() && rawBayer.packing != BayerFormat::Packing::None) {
+ rawBayer.packing = BayerFormat::Packing::None;
+ rawFormat = rawBayer.toPixelFormat();
+ }
+
+ if (rawStream->pixelFormat != rawFormat ||
+ rawStream->size != rpiConfig->sensorFormat_.size) {
+ rawStream->pixelFormat = rawFormat;
+ rawStream->size = rpiConfig->sensorFormat_.size;
+
+ status = CameraConfiguration::Adjusted;
+ }
+
+ rawStreams[0].format =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam_[Unicam::Image].dev(), rawStream);
+ }
+
+ /*
+ * For the two ISP outputs, one stream must be equal or smaller than the
+ * other in all dimensions.
+ *
+ * Index 0 contains the largest requested resolution.
+ */
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ Size size;
+
+ /*
+ * \todo Should we warn if upscaling, as it reduces the image
+ * quality and is usually undesired ?
+ */
+
+ size.width = std::min(outStreams[i].cfg->size.width,
+ outStreams[0].cfg->size.width);
+ size.height = std::min(outStreams[i].cfg->size.height,
+ outStreams[0].cfg->size.height);
+
+ if (outStreams[i].cfg->size != size) {
+ outStreams[i].cfg->size = size;
+ status = CameraConfiguration::Status::Adjusted;
+ }
+
+ /*
+ * Output 0 must be for the largest resolution. We will
+ * have that fixed up in the code above.
+ */
+ outStreams[i].dev = isp_[i == 0 ? Isp::Output0 : Isp::Output1].dev();
+
+ outStreams[i].format = RPi::PipelineHandlerBase::toV4L2DeviceFormat(outStreams[i].dev, outStreams[i].cfg);
+ }
+
+ return status;
+}
+
+int Vc4CameraData::platformPipelineConfigure(const std::unique_ptr<YamlObject> &root)
+{
+ config_ = {
+ .minUnicamBuffers = 2,
+ .minTotalUnicamBuffers = 4,
+ };
+
+ if (!root)
+ return 0;
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Error) << "Unexpected configuration file version reported";
+ return -EINVAL;
+ }
+
+ std::optional<std::string> target = (*root)["target"].get<std::string>();
+ if (!target || *target != "bcm2835") {
+ LOG(RPI, Error) << "Unexpected target reported: expected \"bcm2835\", got "
+ << *target;
+ return -EINVAL;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+ config_.minUnicamBuffers =
+ phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
+ config_.minTotalUnicamBuffers =
+ phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
+
+ if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
+ return -EINVAL;
+ }
+
+ if (config_.minTotalUnicamBuffers < 1) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig)
+{
+ const std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ const std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+ int ret;
+
+ V4L2VideoDevice *unicam = unicam_[Unicam::Image].dev();
+ V4L2DeviceFormat unicamFormat;
+
+ /*
+ * See which streams are requested, and route the user
+ * StreamConfiguration appropriately.
+ */
+ if (!rawStreams.empty()) {
+ rawStreams[0].cfg->setStream(&unicam_[Unicam::Image]);
+ unicam_[Unicam::Image].setFlags(StreamFlag::External);
+ unicamFormat = rawStreams[0].format;
+ } else {
+ unicamFormat =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam,
+ rpiConfig->sensorFormat_,
+ BayerFormat::Packing::CSI2);
+ }
+
+ ret = unicam->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ ret = isp_[Isp::Input].dev()->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ LOG(RPI, Info) << "Sensor: " << sensor_->id()
+ << " - Selected sensor format: " << rpiConfig->sensorFormat_
+ << " - Selected unicam format: " << unicamFormat;
+
+ /* Use a sensible small default size if no output streams are configured. */
+ Size maxSize = outStreams.empty() ? Size(320, 240) : outStreams[0].cfg->size;
+ V4L2DeviceFormat format;
+
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ StreamConfiguration *cfg = outStreams[i].cfg;
+
+ /* The largest resolution gets routed to the ISP Output 0 node. */
+ RPi::Stream *stream = i == 0 ? &isp_[Isp::Output0] : &isp_[Isp::Output1];
+ format = outStreams[i].format;
+
+ LOG(RPI, Debug) << "Setting " << stream->name() << " to "
+ << format;
+
+ ret = stream->dev()->setFormat(&format);
+ if (ret)
+ return -EINVAL;
+
+ LOG(RPI, Debug)
+ << "Stream " << stream->name() << " has color space "
+ << ColorSpace::toString(cfg->colorSpace);
+
+ cfg->setStream(stream);
+ stream->setFlags(StreamFlag::External);
+ }
+
+ ispOutputTotal_ = outStreams.size();
+
+ /*
+ * If ISP::Output0 stream has not been configured by the application,
+ * we must allow the hardware to generate an output so that the data
+ * flow in the pipeline handler remains consistent, and we still generate
+ * statistics for the IPA to use. So enable the output at a very low
+ * resolution for internal use.
+ *
+ * \todo Allow the pipeline to work correctly without Output0 and only
+ * statistics coming from the hardware.
+ */
+ if (outStreams.empty()) {
+ V4L2VideoDevice *dev = isp_[Isp::Output0].dev();
+
+ format = {};
+ format.size = maxSize;
+ format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+ /* No one asked for output, so the color space doesn't matter. */
+ format.colorSpace = ColorSpace::Sycc;
+ ret = dev->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error)
+ << "Failed to set default format on ISP Output0: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+
+ LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
+ << format;
+ }
+
+ /*
+ * If ISP::Output1 stream has not been requested by the application, we
+ * set it up for internal use now. This second stream will be used for
+ * fast colour denoise, and must be a quarter resolution of the ISP::Output0
+ * stream. However, also limit the maximum size to 1200 pixels in the
+ * larger dimension, just to avoid being wasteful with buffer allocations
+ * and memory bandwidth.
+ *
+ * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
+ * colour denoise will not run.
+ */
+ if (outStreams.size() <= 1) {
+ V4L2VideoDevice *dev = isp_[Isp::Output1].dev();
+
+ V4L2DeviceFormat output1Format;
+ constexpr Size maxDimensions(1200, 1200);
+ const Size limit = maxDimensions.boundedToAspectRatio(format.size);
+
+ output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
+ output1Format.colorSpace = format.colorSpace;
+ output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+
+ LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
+ << output1Format;
+
+ ret = dev->setFormat(&output1Format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP Output1: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+ }
+
+ /* ISP statistics output format. */
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
+ ret = isp_[Isp::Stats].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
+ << format;
+ return ret;
+ }
+
+ ispOutputTotal_++;
+
+ /*
+ * Configure the Unicam embedded data output format only if the sensor
+ * supports it.
+ */
+ if (sensorMetadata_) {
+ V4L2SubdeviceFormat embeddedFormat;
+
+ sensor_->device()->getFormat(1, &embeddedFormat);
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
+ format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
+
+ LOG(RPI, Debug) << "Setting embedded data format " << format.toString();
+ ret = unicam_[Unicam::Embedded].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
+ << format;
+ return ret;
+ }
+ }
+
+ /* Figure out the smallest selection the ISP will allow. */
+ Rectangle testCrop(0, 0, 1, 1);
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
+ ispMinCropSize_ = testCrop.size();
+
+ /* Adjust aspect ratio by providing crops on the input image. */
+ Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
+ ispCrop_ = size.centeredTo(Rectangle(unicamFormat.size).center());
+
+ platformSetIspCrop();
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigureIpa(ipa::RPi::ConfigParams &params)
+{
+ params.ispControls = isp_[Isp::Input].dev()->controls();
+
+ /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
+ if (!lsTable_.isValid()) {
+ lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
+ if (!lsTable_.isValid())
+ return -ENOMEM;
+
+ /* Allow the IPA to mmap the LS table via the file descriptor. */
+ /*
+ * \todo Investigate if mapping the lens shading table buffer
+ * could be handled with mapBuffers().
+ */
+ params.lsTableHandle = lsTable_;
+ }
+
+ return 0;
+}
+
+void Vc4CameraData::platformStart()
+{
+}
+
+void Vc4CameraData::platformStop()
+{
+ bayerQueue_ = {};
+ embeddedQueue_ = {};
+}
+
+void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : unicam_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ if (stream == &unicam_[Unicam::Image]) {
+ /*
+ * Lookup the sensor controls used for this frame sequence from
+ * DelayedControl and queue them along with the frame buffer.
+ */
+ auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
+ /*
+ * Add the frame timestamp to the ControlList for the IPA to use
+ * as it does not receive the FrameBuffer object.
+ */
+ ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
+ bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
+ } else {
+ embeddedQueue_.push(buffer);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
+{
+ if (!isRunning())
+ return;
+
+ LOG(RPI, Debug) << "Stream ISP Input buffer complete"
+ << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /* The ISP input buffer gets re-queued into Unicam. */
+ handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
+ handleState();
+}
+
+void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : isp_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our ISP output streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /*
+ * ISP statistics buffer must not be re-queued or sent back to the
+ * application until after the IPA signals so.
+ */
+ if (stream == &isp_[Isp::Stats]) {
+ ipa::RPi::ProcessParams params;
+ params.buffers.stats = index | RPi::MaskStats;
+ params.ipaContext = requestQueue_.front()->sequence();
+ ipa_->processStats(params);
+ } else {
+ /* Any other ISP output can be handed back to the application now. */
+ handleStreamBuffer(buffer, stream);
+ }
+
+ /*
+ * Increment the number of ISP outputs generated.
+ * This is needed to track dropped frames.
+ */
+ ispOutputCount_++;
+
+ handleState();
+}
+
+void Vc4CameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
+{
+ if (!isRunning())
+ return;
+
+ FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID).buffer;
+
+ handleStreamBuffer(buffer, &isp_[Isp::Stats]);
+
+ state_ = State::IpaComplete;
+ handleState();
+}
+
+void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers,
+ [[maybe_unused]] bool stitchSwapBuffers)
+{
+ unsigned int embeddedId = buffers.embedded & RPi::MaskID;
+ unsigned int bayer = buffers.bayer & RPi::MaskID;
+ FrameBuffer *buffer;
+
+ if (!isRunning())
+ return;
+
+ buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID).buffer;
+ LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ isp_[Isp::Input].queueBuffer(buffer);
+ ispOutputCount_ = 0;
+
+ if (sensorMetadata_ && embeddedId) {
+ buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID).buffer;
+ handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::setIspControls(const ControlList &controls)
+{
+ ControlList ctrls = controls;
+
+ if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
+ ControlValue &value =
+ const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
+ Span<uint8_t> s = value.data();
+ bcm2835_isp_lens_shading *ls =
+ reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
+ ls->dmabuf = lsTable_.get();
+ }
+
+ isp_[Isp::Input].dev()->setControls(&ctrls);
+ handleState();
+}
+
+void Vc4CameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
+{
+ /*
+ * Set the dequeue timeout to the larger of 5x the maximum reported
+ * frame length advertised by the IPA over a number of frames. Allow
+ * a minimum timeout value of 1s.
+ */
+ utils::Duration timeout =
+ std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
+
+ LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
+ unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
+}
+
+void Vc4CameraData::tryRunPipeline()
+{
+ FrameBuffer *embeddedBuffer;
+ BayerFrame bayerFrame;
+
+ /* If any of our request or buffer queues are empty, we cannot proceed. */
+ if (state_ != State::Idle || requestQueue_.empty() ||
+ bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
+ return;
+
+ if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
+ return;
+
+ /* Take the first request from the queue and action the IPA. */
+ Request *request = requestQueue_.front();
+
+ /* See if a new ScalerCrop value needs to be applied. */
+ applyScalerCrop(request->controls());
+
+ /*
+ * Clear the request metadata and fill it with some initial non-IPA
+ * related controls. We clear it first because the request metadata
+ * may have been populated if we have dropped the previous frame.
+ */
+ request->metadata().clear();
+ fillRequestMetadata(bayerFrame.controls, request);
+
+ /* Set our state to say the pipeline is active. */
+ state_ = State::Busy;
+
+ unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
+
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Bayer buffer id: " << bayer;
+
+ ipa::RPi::PrepareParams params;
+ params.buffers.bayer = RPi::MaskBayerData | bayer;
+ params.sensorControls = std::move(bayerFrame.controls);
+ params.requestControls = request->controls();
+ params.ipaContext = request->sequence();
+ params.delayContext = bayerFrame.delayContext;
+ params.buffers.embedded = 0;
+
+ if (embeddedBuffer) {
+ unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
+
+ params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Embedded buffer id: " << embeddedId;
+ }
+
+ ipa_->prepareIsp(params);
+}
+
+bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
+{
+ if (bayerQueue_.empty())
+ return false;
+
+ /*
+ * Find the embedded data buffer with a matching timestamp to pass to
+ * the IPA. Any embedded buffers with a timestamp lower than the
+ * current bayer buffer will be removed and re-queued to the driver.
+ */
+ uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
+ embeddedBuffer = nullptr;
+ while (!embeddedQueue_.empty()) {
+ FrameBuffer *b = embeddedQueue_.front();
+ if (b->metadata().timestamp < ts) {
+ embeddedQueue_.pop();
+ unicam_[Unicam::Embedded].returnBuffer(b);
+ LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
+ << unicam_[Unicam::Embedded].name();
+ } else if (b->metadata().timestamp == ts) {
+ /* Found a match! */
+ embeddedBuffer = b;
+ embeddedQueue_.pop();
+ break;
+ } else {
+ break; /* Only higher timestamps from here. */
+ }
+ }
+
+ if (!embeddedBuffer && sensorMetadata_) {
+ if (embeddedQueue_.empty()) {
+ /*
+ * If the embedded buffer queue is empty, wait for the next
+ * buffer to arrive - dequeue ordering may send the image
+ * buffer first.
+ */
+ LOG(RPI, Debug) << "Waiting for next embedded buffer.";
+ return false;
+ }
+
+ /* Log if there is no matching embedded data buffer found. */
+ LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
+ }
+
+ bayerFrame = std::move(bayerQueue_.front());
+ bayerQueue_.pop();
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4, "rpi/vc4")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
deleted file mode 100644
index f0ebe2e0..00000000
--- a/src/libcamera/pipeline/simple/converter.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Laurent Pinchart
- *
- * converter.h - Format converter for simple pipeline handler
- */
-
-#pragma once
-
-#include <functional>
-#include <map>
-#include <memory>
-#include <string>
-#include <tuple>
-#include <vector>
-
-#include <libcamera/pixel_format.h>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/signal.h>
-
-namespace libcamera {
-
-class FrameBuffer;
-class MediaDevice;
-class Size;
-class SizeRange;
-struct StreamConfiguration;
-class V4L2M2MDevice;
-
-class SimpleConverter
-{
-public:
- SimpleConverter(MediaDevice *media);
-
- bool isValid() const { return m2m_ != nullptr; }
-
- std::vector<PixelFormat> formats(PixelFormat input);
- SizeRange sizes(const Size &input);
-
- std::tuple<unsigned int, unsigned int>
- strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
-
- int configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
- int exportBuffers(unsigned int ouput, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
-
- int start();
- void stop();
-
- int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs);
-
- Signal<FrameBuffer *> inputBufferReady;
- Signal<FrameBuffer *> outputBufferReady;
-
-private:
- class Stream : protected Loggable
- {
- public:
- Stream(SimpleConverter *converter, unsigned int index);
-
- bool isValid() const { return m2m_ != nullptr; }
-
- int configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg);
- int exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
-
- int start();
- void stop();
-
- int queueBuffers(FrameBuffer *input, FrameBuffer *output);
-
- protected:
- std::string logPrefix() const override;
-
- private:
- void captureBufferReady(FrameBuffer *buffer);
- void outputBufferReady(FrameBuffer *buffer);
-
- SimpleConverter *converter_;
- unsigned int index_;
- std::unique_ptr<V4L2M2MDevice> m2m_;
-
- unsigned int inputBufferCount_;
- unsigned int outputBufferCount_;
- };
-
- std::string deviceNode_;
- std::unique_ptr<V4L2M2MDevice> m2m_;
-
- std::vector<Stream> streams_;
- std::map<FrameBuffer *, unsigned int> queue_;
-};
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/meson.build b/src/libcamera/pipeline/simple/meson.build
index 9c99b32f..42b0896d 100644
--- a/src/libcamera/pipeline/simple/meson.build
+++ b/src/libcamera/pipeline/simple/meson.build
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
libcamera_sources += files([
- 'converter.cpp',
'simple.cpp',
])
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index bc0cb1a0..db3575c3 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright (C) 2019, Martijn Braam
*
- * simple.cpp - Pipeline handler for simple pipelines
+ * Pipeline handler for simple pipelines
*/
#include <algorithm>
@@ -30,13 +30,14 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/converter.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/software_isp/software_isp.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
-#include "converter.h"
namespace libcamera {
@@ -100,8 +101,14 @@ LOG_DEFINE_CATEGORY(SimplePipeline)
*
* During the breadth-first search, the pipeline is traversed from entity to
* entity, by following media graph links from source to sink, starting at the
- * camera sensor. When reaching an entity (on its sink side), all its source
- * pads are considered to continue the graph traversal.
+ * camera sensor.
+ *
+ * When reaching an entity (on its sink side), if the entity is a V4L2 subdev
+ * that supports the streams API, the subdev internal routes are followed to
+ * find the connected source pads. Otherwise all of the entity's source pads
+ * are considered to continue the graph traversal. The pipeline handler
+ * currently considers the default internal routes only and doesn't attempt to
+ * setup custom routes. This can be extended if needed.
*
* The shortest path between the camera sensor and a video node is stored in
* SimpleCameraData::entities_ as a list of SimpleCameraData::Entity structures,
@@ -179,14 +186,24 @@ struct SimplePipelineInfo {
* and the number of streams it supports.
*/
std::vector<std::pair<const char *, unsigned int>> converters;
+ /*
+ * Using Software ISP is to be enabled per driver.
+ *
+ * The Software ISP can't be used together with the converters.
+ */
+ bool swIspEnabled;
};
namespace {
static const SimplePipelineInfo supportedDevices[] = {
- { "imx7-csi", { { "pxp", 1 } } },
- { "qcom-camss", {} },
- { "sun6i-csi", {} },
+ { "dcmipp", {}, false },
+ { "imx7-csi", { { "pxp", 1 } }, false },
+ { "j721e-csi2rx", {}, false },
+ { "mtk-seninf", { { "mtk-mdp", 3 } }, false },
+ { "mxc-isi", {}, false },
+ { "qcom-camss", {}, true },
+ { "sun6i-csi", {}, false },
};
} /* namespace */
@@ -204,7 +221,8 @@ public:
int init();
int setupLinks();
int setupFormats(V4L2SubdeviceFormat *format,
- V4L2Subdevice::Whence whence);
+ V4L2Subdevice::Whence whence,
+ Transform transform = Transform::Identity);
void bufferReady(FrameBuffer *buffer);
unsigned int streamIndex(const Stream *stream) const
@@ -216,6 +234,11 @@ public:
/* The media entity, always valid. */
MediaEntity *entity;
/*
+ * Whether or not the entity is a subdev that supports the
+ * routing API.
+ */
+ bool supportsRouting;
+ /*
* The local sink pad connected to the upstream entity, null for
* the camera sensor at the beginning of the pipeline.
*/
@@ -254,16 +277,22 @@ public:
std::vector<Configuration> configs_;
std::map<PixelFormat, std::vector<const Configuration *>> formats_;
- std::unique_ptr<SimpleConverter> converter_;
- std::vector<std::unique_ptr<FrameBuffer>> converterBuffers_;
- bool useConverter_;
- std::queue<std::map<unsigned int, FrameBuffer *>> converterQueue_;
+ std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
+ std::queue<std::map<unsigned int, FrameBuffer *>> conversionQueue_;
+ bool useConversion_;
+
+ std::unique_ptr<Converter> converter_;
+ std::unique_ptr<SoftwareIsp> swIsp_;
private:
void tryPipeline(unsigned int code, const Size &size);
+ static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
- void converterInputDone(FrameBuffer *buffer);
- void converterOutputDone(FrameBuffer *buffer);
+ void conversionInputDone(FrameBuffer *buffer);
+ void conversionOutputDone(FrameBuffer *buffer);
+
+ void ispStatsReady();
+ void setSensorControls(const ControlList &sensorControls);
};
class SimpleCameraConfiguration : public CameraConfiguration
@@ -279,6 +308,7 @@ public:
}
bool needConversion() const { return needConversion_; }
+ const Transform &combinedTransform() const { return combinedTransform_; }
private:
/*
@@ -291,6 +321,7 @@ private:
const SimpleCameraData::Configuration *pipeConfig_;
bool needConversion_;
+ Transform combinedTransform_;
};
class SimplePipelineHandler : public PipelineHandler
@@ -298,8 +329,8 @@ class SimplePipelineHandler : public PipelineHandler
public:
SimplePipelineHandler(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -313,6 +344,7 @@ public:
V4L2VideoDevice *video(const MediaEntity *entity);
V4L2Subdevice *subdev(const MediaEntity *entity);
MediaDevice *converter() { return converter_; }
+ bool swIspEnabled() const { return swIspEnabled_; }
protected:
int queueRequestDevice(Camera *camera, Request *request) override;
@@ -332,6 +364,7 @@ private:
}
std::vector<MediaEntity *> locateSensors();
+ static int resetRoutingTable(V4L2Subdevice *subdev);
const MediaPad *acquirePipeline(SimpleCameraData *data);
void releasePipeline(SimpleCameraData *data);
@@ -340,6 +373,7 @@ private:
std::map<const MediaEntity *, EntityData> entities_;
MediaDevice *converter_;
+ bool swIspEnabled_;
};
/* -----------------------------------------------------------------------------
@@ -386,17 +420,40 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
break;
}
- /* The actual breadth-first search algorithm. */
visited.insert(entity);
- for (MediaPad *pad : entity->pads()) {
- if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
- continue;
+ /*
+ * Add direct downstream entities to the search queue. If the
+ * current entity supports the subdev internal routing API,
+ * restrict the search to downstream entities reachable through
+ * active routes.
+ */
+
+ std::vector<const MediaPad *> pads;
+ bool supportsRouting = false;
+
+ if (sinkPad) {
+ pads = routedSourcePads(sinkPad);
+ if (!pads.empty())
+ supportsRouting = true;
+ }
+
+ if (pads.empty()) {
+ for (const MediaPad *pad : entity->pads()) {
+ if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
+ continue;
+ pads.push_back(pad);
+ }
+ }
+
+ for (const MediaPad *pad : pads) {
for (MediaLink *link : pad->links()) {
MediaEntity *next = link->sink()->entity();
if (visited.find(next) == visited.end()) {
queue.push({ next, link->sink() });
- parents.insert({ next, { entity, sinkPad, pad, link } });
+
+ Entity e{ entity, supportsRouting, sinkPad, pad, link };
+ parents.insert({ next, e });
}
}
}
@@ -410,7 +467,7 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
* to the sensor. Store all the entities in the pipeline, from the
* camera sensor to the video node, in entities_.
*/
- entities_.push_front({ entity, sinkPad, nullptr, nullptr });
+ entities_.push_front({ entity, false, sinkPad, nullptr, nullptr });
for (auto it = parents.find(entity); it != parents.end();
it = parents.find(entity)) {
@@ -455,14 +512,45 @@ int SimpleCameraData::init()
/* Open the converter, if any. */
MediaDevice *converter = pipe->converter();
if (converter) {
- converter_ = std::make_unique<SimpleConverter>(converter);
- if (!converter_->isValid()) {
+ converter_ = ConverterFactoryBase::create(converter);
+ if (!converter_) {
LOG(SimplePipeline, Warning)
<< "Failed to create converter, disabling format conversion";
converter_.reset();
} else {
- converter_->inputBufferReady.connect(this, &SimpleCameraData::converterInputDone);
- converter_->outputBufferReady.connect(this, &SimpleCameraData::converterOutputDone);
+ converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ }
+ }
+
+ /*
+ * Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
+ */
+ if (!converter_ && pipe->swIspEnabled()) {
+ swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get());
+ if (!swIsp_->isValid()) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create software ISP, disabling software debayering";
+ swIsp_.reset();
+ } else {
+ /*
+ * The inputBufferReady signal is emitted from the soft ISP thread,
+ * and needs to be handled in the pipeline handler thread. Signals
+ * implement queued delivery, but this works transparently only if
+ * the receiver is bound to the target thread. As the
+ * SimpleCameraData class doesn't inherit from the Object class, it
+ * is not bound to any thread, and the signal would be delivered
+ * synchronously. Instead, connect the signal to a lambda function
+ * bound explicitly to the pipe, which is bound to the pipeline
+ * handler thread. The function then simply forwards the call to
+ * conversionInputDone().
+ */
+ swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) {
+ this->conversionInputDone(buffer);
+ });
+ swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
+ swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
}
}
@@ -520,13 +608,13 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
* corresponding possible V4L2 pixel formats on the video node.
*/
V4L2SubdeviceFormat format{};
- format.mbus_code = code;
+ format.code = code;
format.size = size;
int ret = setupFormats(&format, V4L2Subdevice::TryFormat);
if (ret < 0) {
/* Pipeline configuration failed, skip this configuration. */
- format.mbus_code = code;
+ format.code = code;
format.size = size;
LOG(SimplePipeline, Debug)
<< "Sensor format " << format
@@ -534,7 +622,7 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
return;
}
- V4L2VideoDevice::Formats videoFormats = video_->formats(format.mbus_code);
+ V4L2VideoDevice::Formats videoFormats = video_->formats(format.code);
LOG(SimplePipeline, Debug)
<< "Adding configuration for " << format.size
@@ -556,12 +644,20 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
config.captureFormat = pixelFormat;
config.captureSize = format.size;
- if (!converter_) {
- config.outputFormats = { pixelFormat };
- config.outputSizes = config.captureSize;
- } else {
+ if (converter_) {
config.outputFormats = converter_->formats(pixelFormat);
config.outputSizes = converter_->sizes(format.size);
+ } else if (swIsp_) {
+ config.outputFormats = swIsp_->formats(pixelFormat);
+ config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
+ if (config.outputFormats.empty()) {
+ /* Do not use swIsp for unsupported pixelFormat's. */
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+ } else {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
}
configs_.push_back(config);
@@ -577,15 +673,32 @@ int SimpleCameraData::setupLinks()
* multiple sink links to be enabled together, even on different sink
* pads. We must thus start by disabling all sink links (but the one we
* want to enable) before enabling the pipeline link.
+ *
+ * The entities_ list stores entities along with their source link. We
+ * need to process the link in the context of the sink entity, so
+ * record the source link of the current entity as the sink link of the
+ * next entity, and skip the first entity in the loop.
*/
+ MediaLink *sinkLink = nullptr;
+
for (SimpleCameraData::Entity &e : entities_) {
- if (!e.sourceLink)
- break;
+ if (!sinkLink) {
+ sinkLink = e.sourceLink;
+ continue;
+ }
+
+ for (MediaPad *pad : e.entity->pads()) {
+ /*
+ * If the entity supports the V4L2 internal routing API,
+ * assume that it may carry multiple independent streams
+ * concurrently, and only disable links on the sink and
+ * source pads used by the pipeline.
+ */
+ if (e.supportsRouting && pad != e.sink && pad != e.source)
+ continue;
- MediaEntity *remote = e.sourceLink->sink()->entity();
- for (MediaPad *pad : remote->pads()) {
for (MediaLink *link : pad->links()) {
- if (link == e.sourceLink)
+ if (link == sinkLink)
continue;
if ((link->flags() & MEDIA_LNK_FL_ENABLED) &&
@@ -597,18 +710,21 @@ int SimpleCameraData::setupLinks()
}
}
- if (!(e.sourceLink->flags() & MEDIA_LNK_FL_ENABLED)) {
- ret = e.sourceLink->setEnabled(true);
+ if (!(sinkLink->flags() & MEDIA_LNK_FL_ENABLED)) {
+ ret = sinkLink->setEnabled(true);
if (ret < 0)
return ret;
}
+
+ sinkLink = e.sourceLink;
}
return 0;
}
int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
- V4L2Subdevice::Whence whence)
+ V4L2Subdevice::Whence whence,
+ Transform transform)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
int ret;
@@ -617,7 +733,7 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
* Configure the format on the sensor output and propagate it through
* the pipeline.
*/
- ret = sensor_->setFormat(format);
+ ret = sensor_->setFormat(format, transform);
if (ret < 0)
return ret;
@@ -644,7 +760,7 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
if (ret < 0)
return ret;
- if (format->mbus_code != sourceFormat.mbus_code ||
+ if (format->code != sourceFormat.code ||
format->size != sourceFormat.size) {
LOG(SimplePipeline, Debug)
<< "Source '" << source->entity()->name()
@@ -678,7 +794,7 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
* point converting an erroneous buffer.
*/
if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
- if (!useConverter_) {
+ if (!useConversion_) {
/* No conversion, just complete the request. */
Request *request = buffer->request();
pipe->completeBuffer(request, buffer);
@@ -687,23 +803,23 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
}
/*
- * The converter is in use. Requeue the internal buffer for
- * capture (unless the stream is being stopped), and complete
- * the request with all the user-facing buffers.
+ * The converter or Software ISP is in use. Requeue the internal
+ * buffer for capture (unless the stream is being stopped), and
+ * complete the request with all the user-facing buffers.
*/
if (buffer->metadata().status != FrameMetadata::FrameCancelled)
video_->queueBuffer(buffer);
- if (converterQueue_.empty())
+ if (conversionQueue_.empty())
return;
Request *request = nullptr;
- for (auto &item : converterQueue_.front()) {
+ for (auto &item : conversionQueue_.front()) {
FrameBuffer *outputBuffer = item.second;
request = outputBuffer->request();
pipe->completeBuffer(request, outputBuffer);
}
- converterQueue_.pop();
+ conversionQueue_.pop();
if (request)
pipe->completeRequest(request);
@@ -720,9 +836,9 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
*/
Request *request = buffer->request();
- if (useConverter_ && !converterQueue_.empty()) {
+ if (useConversion_ && !conversionQueue_.empty()) {
const std::map<unsigned int, FrameBuffer *> &outputs =
- converterQueue_.front();
+ conversionQueue_.front();
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
if (outputBuffer)
@@ -735,18 +851,22 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
buffer->metadata().timestamp);
/*
- * Queue the captured and the request buffer to the converter if format
- * conversion is needed. If there's no queued request, just requeue the
- * captured buffer for capture.
+ * Queue the captured and the request buffer to the converter or Software
+ * ISP if format conversion is needed. If there's no queued request, just
+ * requeue the captured buffer for capture.
*/
- if (useConverter_) {
- if (converterQueue_.empty()) {
+ if (useConversion_) {
+ if (conversionQueue_.empty()) {
video_->queueBuffer(buffer);
return;
}
- converter_->queueBuffers(buffer, converterQueue_.front());
- converterQueue_.pop();
+ if (converter_)
+ converter_->queueBuffers(buffer, conversionQueue_.front());
+ else
+ swIsp_->queueBuffers(buffer, conversionQueue_.front());
+
+ conversionQueue_.pop();
return;
}
@@ -755,13 +875,13 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
pipe->completeRequest(request);
}
-void SimpleCameraData::converterInputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
{
/* Queue the input buffer back for capture. */
video_->queueBuffer(buffer);
}
-void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
@@ -771,6 +891,56 @@ void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
pipe->completeRequest(request);
}
+void SimpleCameraData::ispStatsReady()
+{
+ /* \todo Use the DelayedControls class */
+ swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE }));
+}
+
+void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
+{
+ ControlList ctrls(sensorControls);
+ sensor_->setControls(&ctrls);
+}
+
+/* Retrieve all source pads connected to a sink pad through active routes. */
+std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
+{
+ MediaEntity *entity = sink->entity();
+ std::unique_ptr<V4L2Subdevice> subdev =
+ std::make_unique<V4L2Subdevice>(entity);
+
+ int ret = subdev->open();
+ if (ret < 0)
+ return {};
+
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev->getRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret < 0)
+ return {};
+
+ std::vector<const MediaPad *> pads;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (sink->index() != route.sink.pad ||
+ !(route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ const MediaPad *pad = entity->getPadByIndex(route.source.pad);
+ if (!pad) {
+ LOG(SimplePipeline, Warning)
+ << "Entity " << entity->name()
+ << " has invalid route source pad "
+ << route.source.pad;
+ }
+
+ pads.push_back(pad);
+ }
+
+ return pads;
+}
+
/* -----------------------------------------------------------------------------
* Camera Configuration
*/
@@ -782,17 +952,45 @@ SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
{
}
+namespace {
+
+static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
+{
+ ASSERT(supportedSizes.min <= supportedSizes.max);
+
+ if (supportedSizes.min == supportedSizes.max)
+ return supportedSizes.max;
+
+ unsigned int hStep = supportedSizes.hStep;
+ unsigned int vStep = supportedSizes.vStep;
+
+ if (hStep == 0)
+ hStep = supportedSizes.max.width - supportedSizes.min.width;
+ if (vStep == 0)
+ vStep = supportedSizes.max.height - supportedSizes.min.height;
+
+ Size adjusted = requestedSize.boundedTo(supportedSizes.max)
+ .expandedTo(supportedSizes.min);
+
+ return adjusted.shrunkBy(supportedSizes.min)
+ .alignedDownTo(hStep, vStep)
+ .grownBy(supportedSizes.min);
+}
+
+} /* namespace */
+
CameraConfiguration::Status SimpleCameraConfiguration::validate()
{
+ const CameraSensor *sensor = data_->sensor_.get();
Status status = Valid;
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = sensor->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
- }
/* Cap the number of entries to the available streams. */
if (config_.size() > data_->streams_.size()) {
@@ -897,10 +1095,19 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
}
if (!pipeConfig_->outputSizes.contains(cfg.size)) {
+ Size adjustedSize = pipeConfig_->captureSize;
+ /*
+ * The converter (when present) may not be able to output
+ * a size identical to its input size. The capture size is thus
+ * not guaranteed to be a valid output size. In such cases, use
+ * the smaller valid output size closest to the requested.
+ */
+ if (!pipeConfig_->outputSizes.contains(adjustedSize))
+ adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
LOG(SimplePipeline, Debug)
<< "Adjusting size from " << cfg.size
- << " to " << pipeConfig_->captureSize;
- cfg.size = pipeConfig_->captureSize;
+ << " to " << adjustedSize;
+ cfg.size = adjustedSize;
status = Adjusted;
}
@@ -912,13 +1119,16 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
/* Set the stride, frameSize and bufferCount. */
if (needConversion_) {
std::tie(cfg.stride, cfg.frameSize) =
- data_->converter_->strideAndFrameSize(cfg.pixelFormat,
- cfg.size);
+ data_->converter_
+ ? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size)
+ : data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size);
if (cfg.stride == 0)
return Invalid;
} else {
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -944,12 +1154,12 @@ SimplePipelineHandler::SimplePipelineHandler(CameraManager *manager)
{
}
-CameraConfiguration *SimplePipelineHandler::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+SimplePipelineHandler::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
SimpleCameraData *data = cameraData(camera);
- CameraConfiguration *config =
- new SimpleCameraConfiguration(camera, data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<SimpleCameraConfiguration>(camera, data);
if (roles.empty())
return config;
@@ -1020,15 +1230,16 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
const SimpleCameraData::Configuration *pipeConfig = config->pipeConfig();
V4L2SubdeviceFormat format{};
- format.mbus_code = pipeConfig->code;
+ format.code = pipeConfig->code;
format.size = pipeConfig->sensorSize;
- ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat);
+ ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat,
+ config->combinedTransform());
if (ret < 0)
return ret;
/* Configure the video node. */
- V4L2PixelFormat videoFormat = V4L2PixelFormat::fromPixelFormat(pipeConfig->captureFormat);
+ V4L2PixelFormat videoFormat = video->toV4L2PixelFormat(pipeConfig->captureFormat);
V4L2DeviceFormat captureFormat;
captureFormat.fourcc = videoFormat;
@@ -1055,14 +1266,14 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
/* Configure the converter if needed. */
std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
- data->useConverter_ = config->needConversion();
+ data->useConversion_ = config->needConversion();
for (unsigned int i = 0; i < config->size(); ++i) {
StreamConfiguration &cfg = config->at(i);
cfg.setStream(&data->streams_[i]);
- if (data->useConverter_)
+ if (data->useConversion_)
outputCfgs.push_back(cfg);
}
@@ -1075,7 +1286,10 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
inputCfg.stride = captureFormat.planes[0].bpl;
inputCfg.bufferCount = kNumInternalBuffers;
- return data->converter_->configure(inputCfg, outputCfgs);
+ return data->converter_
+ ? data->converter_->configure(inputCfg, outputCfgs)
+ : data->swIsp_->configure(inputCfg, outputCfgs,
+ data->sensor_->controls());
}
int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
@@ -1088,9 +1302,12 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
* Export buffers on the converter or capture video node, depending on
* whether the converter is used or not.
*/
- if (data->useConverter_)
- return data->converter_->exportBuffers(data->streamIndex(stream),
- count, buffers);
+ if (data->useConversion_)
+ return data->converter_
+ ? data->converter_->exportBuffers(data->streamIndex(stream),
+ count, buffers)
+ : data->swIsp_->exportBuffers(data->streamIndex(stream),
+ count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
@@ -1109,13 +1326,13 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return -EBUSY;
}
- if (data->useConverter_) {
+ if (data->useConversion_) {
/*
* When using the converter allocate a fixed number of internal
* buffers.
*/
ret = video->allocateBuffers(kNumInternalBuffers,
- &data->converterBuffers_);
+ &data->conversionBuffers_);
} else {
/* Otherwise, prepare for using buffers from the only stream. */
Stream *stream = &data->streams_[0];
@@ -1134,15 +1351,21 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return ret;
}
- if (data->useConverter_) {
- ret = data->converter_->start();
+ if (data->useConversion_) {
+ if (data->converter_)
+ ret = data->converter_->start();
+ else if (data->swIsp_)
+ ret = data->swIsp_->start();
+ else
+ ret = 0;
+
if (ret < 0) {
stop(camera);
return ret;
}
/* Queue all internal buffers for capture. */
- for (std::unique_ptr<FrameBuffer> &buffer : data->converterBuffers_)
+ for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
video->queueBuffer(buffer.get());
}
@@ -1154,15 +1377,19 @@ void SimplePipelineHandler::stopDevice(Camera *camera)
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
- if (data->useConverter_)
- data->converter_->stop();
+ if (data->useConversion_) {
+ if (data->converter_)
+ data->converter_->stop();
+ else if (data->swIsp_)
+ data->swIsp_->stop();
+ }
video->streamOff();
video->releaseBuffers();
video->bufferReady.disconnect(data, &SimpleCameraData::bufferReady);
- data->converterBuffers_.clear();
+ data->conversionBuffers_.clear();
releasePipeline(data);
}
@@ -1180,7 +1407,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* queue, it will be handed to the converter in the capture
* completion handler.
*/
- if (data->useConverter_) {
+ if (data->useConversion_) {
buffers.emplace(data->streamIndex(stream), buffer);
} else {
ret = data->video_->queueBuffer(buffer);
@@ -1189,8 +1416,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
}
}
- if (data->useConverter_)
- data->converterQueue_.push(std::move(buffers));
+ if (data->useConversion_)
+ data->conversionQueue_.push(std::move(buffers));
return 0;
}
@@ -1260,6 +1487,37 @@ std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
return sensors;
}
+int SimplePipelineHandler::resetRoutingTable(V4L2Subdevice *subdev)
+{
+ /* Reset the media entity routing table to its default state. */
+ V4L2Subdevice::Routing routing = {};
+
+ int ret = subdev->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return ret;
+
+ ret = subdev->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * If the routing table is empty we won't be able to meaningfully use
+ * the subdev.
+ */
+ if (routing.empty()) {
+ LOG(SimplePipeline, Error)
+ << "Default routing table of " << subdev->deviceNode()
+ << " is empty";
+ return -EINVAL;
+ }
+
+ LOG(SimplePipeline, Debug)
+ << "Routing table of " << subdev->deviceNode()
+ << " reset to " << routing;
+
+ return 0;
+}
+
bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
{
const SimplePipelineInfo *info = nullptr;
@@ -1286,6 +1544,8 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
}
}
+ swIspEnabled_ = info->swIspEnabled;
+
/* Locate the sensors. */
std::vector<MediaEntity *> sensors = locateSensors();
if (sensors.empty()) {
@@ -1352,6 +1612,23 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
<< ": " << strerror(-ret);
return false;
}
+
+ if (subdev->caps().hasStreams()) {
+ /*
+ * Reset the routing table to its default state
+ * to make sure entities are enumerate according
+ * to the defaul routing configuration.
+ */
+ ret = resetRoutingTable(subdev.get());
+ if (ret) {
+ LOG(SimplePipeline, Error)
+ << "Failed to reset routes for "
+ << subdev->deviceNode() << ": "
+ << strerror(-ret);
+ return false;
+ }
+ }
+
break;
default:
@@ -1455,6 +1732,6 @@ void SimplePipelineHandler::releasePipeline(SimpleCameraData *data)
}
}
-REGISTER_PIPELINE_HANDLER(SimplePipelineHandler)
+REGISTER_PIPELINE_HANDLER(SimplePipelineHandler, "simple")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index 53b2f23a..8a7409fc 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * uvcvideo.cpp - Pipeline handler for uvcvideo devices
+ * Pipeline handler for uvcvideo devices
*/
#include <algorithm>
@@ -46,8 +46,16 @@ public:
ControlInfoMap::Map *ctrls);
void bufferReady(FrameBuffer *buffer);
+ const std::string &id() const { return id_; }
+
std::unique_ptr<V4L2VideoDevice> video_;
Stream stream_;
+ std::map<PixelFormat, std::vector<SizeRange>> formats_;
+
+private:
+ bool generateId();
+
+ std::string id_;
};
class UVCCameraConfiguration : public CameraConfiguration
@@ -66,8 +74,8 @@ class PipelineHandlerUVC : public PipelineHandler
public:
PipelineHandlerUVC(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -81,8 +89,6 @@ public:
bool match(DeviceEnumerator *enumerator) override;
private:
- std::string generateId(const UVCCameraData *data);
-
int processControl(ControlList *controls, unsigned int id,
const ControlValue &value);
int processControls(UVCCameraData *data, Request *request);
@@ -105,8 +111,8 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
status = Adjusted;
}
@@ -149,7 +155,7 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
cfg.bufferCount = 4;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -159,6 +165,11 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
cfg.stride = format.planes[0].bpl;
cfg.frameSize = format.planes[0].size;
+ if (cfg.colorSpace != format.colorSpace) {
+ cfg.colorSpace = format.colorSpace;
+ status = Adjusted;
+ }
+
return status;
}
@@ -167,24 +178,18 @@ PipelineHandlerUVC::PipelineHandlerUVC(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerUVC::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerUVC::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
UVCCameraData *data = cameraData(camera);
- CameraConfiguration *config = new UVCCameraConfiguration(data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<UVCCameraConfiguration>(data);
if (roles.empty())
return config;
- V4L2VideoDevice::Formats v4l2Formats = data->video_->formats();
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- for (const auto &format : v4l2Formats) {
- PixelFormat pixelFormat = format.first.toPixelFormat();
- if (pixelFormat.isValid())
- deviceFormats[pixelFormat] = format.second;
- }
-
- StreamFormats formats(deviceFormats);
+ StreamFormats formats(data->formats_);
StreamConfiguration cfg(formats);
cfg.pixelFormat = formats.pixelformats().front();
@@ -205,7 +210,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
int ret;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
@@ -213,7 +218,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
return ret;
if (format.size != cfg.size ||
- format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat))
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
cfg.setStream(&data->stream_);
@@ -340,12 +345,8 @@ int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
{
ControlList controls(data->video_->controls());
- for (auto it : request->controls()) {
- unsigned int id = it.first;
- ControlValue &value = it.second;
-
+ for (const auto &[id, value] : request->controls())
processControl(&controls, id, value);
- }
for (const auto &ctrl : controls)
LOG(UVC, Debug)
@@ -383,69 +384,6 @@ int PipelineHandlerUVC::queueRequestDevice(Camera *camera, Request *request)
return 0;
}
-std::string PipelineHandlerUVC::generateId(const UVCCameraData *data)
-{
- const std::string path = data->video_->devicePath();
-
- /* Create a controller ID from first device described in firmware. */
- std::string controllerId;
- std::string searchPath = path;
- while (true) {
- std::string::size_type pos = searchPath.rfind('/');
- if (pos <= 1) {
- LOG(UVC, Error) << "Can not find controller ID";
- return {};
- }
-
- searchPath = searchPath.substr(0, pos);
-
- controllerId = sysfs::firmwareNodePath(searchPath);
- if (!controllerId.empty())
- break;
- }
-
- /*
- * Create a USB ID from the device path which has the known format:
- *
- * path = bus, "-", ports, ":", config, ".", interface ;
- * bus = number ;
- * ports = port, [ ".", ports ] ;
- * port = number ;
- * config = number ;
- * interface = number ;
- *
- * Example: 3-2.4:1.0
- *
- * The bus is not guaranteed to be stable and needs to be stripped from
- * the USB ID. The final USB ID is built up of the ports, config and
- * interface properties.
- *
- * Example 2.4:1.0.
- */
- std::string usbId = utils::basename(path.c_str());
- usbId = usbId.substr(usbId.find('-') + 1);
-
- /* Creata a device ID from the USB devices vendor and product ID. */
- std::string deviceId;
- for (const char *name : { "idVendor", "idProduct" }) {
- std::ifstream file(path + "/../" + name);
-
- if (!file.is_open())
- return {};
-
- std::string value;
- std::getline(file, value);
- file.close();
-
- if (!deviceId.empty())
- deviceId += ":";
-
- deviceId += value;
- }
-
- return controllerId + "-" + usbId + "-" + deviceId;
-}
-
bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
{
MediaDevice *media;
@@ -461,12 +399,7 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
return false;
/* Create and register the camera. */
- std::string id = generateId(data.get());
- if (id.empty()) {
- LOG(UVC, Error) << "Failed to generate camera ID";
- return false;
- }
-
+ std::string id = data->id();
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
@@ -501,6 +434,39 @@ int UVCCameraData::init(MediaDevice *media)
video_->bufferReady.connect(this, &UVCCameraData::bufferReady);
+ /* Generate the camera ID. */
+ if (!generateId()) {
+ LOG(UVC, Error) << "Failed to generate camera ID";
+ return -EINVAL;
+ }
+
+ /*
+ * Populate the map of supported formats, and infer the camera sensor
+ * resolution from the largest size it advertises.
+ */
+ Size resolution;
+ for (const auto &format : video_->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (!pixelFormat.isValid())
+ continue;
+
+ formats_[pixelFormat] = format.second;
+
+ const std::vector<SizeRange> &sizeRanges = format.second;
+ for (const SizeRange &sizeRange : sizeRanges) {
+ if (sizeRange.max > resolution)
+ resolution = sizeRange.max;
+ }
+ }
+
+ if (formats_.empty()) {
+ LOG(UVC, Error)
+ << "Camera " << id_ << " (" << media->model()
+ << ") doesn't expose any supported format";
+ return -EINVAL;
+ }
+
+ /* Populate the camera properties. */
properties_.set(properties::Model, utils::toAscii(media->model()));
/*
@@ -531,19 +497,6 @@ int UVCCameraData::init(MediaDevice *media)
properties_.set(properties::Location, location);
- /*
- * Get the current format in order to initialize the sensor array
- * properties.
- */
- Size resolution;
- for (const auto &it : video_->formats()) {
- const std::vector<SizeRange> &sizeRanges = it.second;
- for (const SizeRange &sizeRange : sizeRanges) {
- if (sizeRange.max > resolution)
- resolution = sizeRange.max;
- }
- }
-
properties_.set(properties::PixelArraySize, resolution);
properties_.set(properties::PixelArrayActiveAreas, { Rectangle(resolution) });
@@ -562,6 +515,70 @@ int UVCCameraData::init(MediaDevice *media)
return 0;
}
+bool UVCCameraData::generateId()
+{
+ const std::string path = video_->devicePath();
+
+ /* Create a controller ID from first device described in firmware. */
+ std::string controllerId;
+ std::string searchPath = path;
+ while (true) {
+ std::string::size_type pos = searchPath.rfind('/');
+ if (pos <= 1) {
+ LOG(UVC, Error) << "Can not find controller ID";
+ return false;
+ }
+
+ searchPath = searchPath.substr(0, pos);
+
+ controllerId = sysfs::firmwareNodePath(searchPath);
+ if (!controllerId.empty())
+ break;
+ }
+
+ /*
+ * Create a USB ID from the device path which has the known format:
+ *
+ * path = bus, "-", ports, ":", config, ".", interface ;
+ * bus = number ;
+ * ports = port, [ ".", ports ] ;
+ * port = number ;
+ * config = number ;
+ * interface = number ;
+ *
+ * Example: 3-2.4:1.0
+ *
+ * The bus is not guaranteed to be stable and needs to be stripped from
+ * the USB ID. The final USB ID is built up of the ports, config and
+ * interface properties.
+ *
+ * Example 2.4:1.0.
+ */
+ std::string usbId = utils::basename(path.c_str());
+ usbId = usbId.substr(usbId.find('-') + 1);
+
+ /* Creata a device ID from the USB devices vendor and product ID. */
+ std::string deviceId;
+ for (const char *name : { "idVendor", "idProduct" }) {
+ std::ifstream file(path + "/../" + name);
+
+ if (!file.is_open())
+ return false;
+
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (!deviceId.empty())
+ deviceId += ":";
+
+ deviceId += value;
+ }
+
+ id_ = controllerId + "-" + usbId + "-" + deviceId;
+ return true;
+}
+
void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
ControlInfoMap::Map *ctrls)
{
@@ -692,6 +709,6 @@ void UVCCameraData::bufferReady(FrameBuffer *buffer)
pipe()->completeRequest(request);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC, "uvcvideo")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index 3379ac5c..c7650432 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * vimc.cpp - Pipeline handler for the vimc device
+ * Pipeline handler for the vimc device
*/
#include <algorithm>
@@ -54,7 +54,7 @@ public:
int init();
int allocateMockIPABuffers();
void bufferReady(FrameBuffer *buffer);
- void paramsBufferReady(unsigned int id);
+ void paramsBufferReady(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
MediaDevice *media_;
std::unique_ptr<CameraSensor> sensor_;
@@ -84,8 +84,8 @@ class PipelineHandlerVimc : public PipelineHandler
public:
PipelineHandlerVimc(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -128,8 +128,8 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
status = Adjusted;
}
@@ -171,7 +171,7 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
cfg.bufferCount = 4;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -189,11 +189,13 @@ PipelineHandlerVimc::PipelineHandlerVimc(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVimc::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
VimcCameraData *data = cameraData(camera);
- CameraConfiguration *config = new VimcCameraConfiguration(data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VimcCameraConfiguration>(data);
if (roles.empty())
return config;
@@ -242,7 +244,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
/* The scaler hardcodes a x3 scale-up ratio. */
V4L2SubdeviceFormat subformat = {};
- subformat.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ subformat.code = MEDIA_BUS_FMT_SGRBG8_1X8;
subformat.size = { cfg.size.width / 3, cfg.size.height / 3 };
ret = data->sensor_->setFormat(&subformat);
@@ -253,7 +255,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
if (ret)
return ret;
- subformat.mbus_code = pixelformats.find(cfg.pixelFormat)->second;
+ subformat.code = pixelformats.find(cfg.pixelFormat)->second;
ret = data->debayer_->setFormat(1, &subformat);
if (ret)
return ret;
@@ -275,7 +277,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
return ret;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
@@ -283,7 +285,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
return ret;
if (format.size != cfg.size ||
- format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat))
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
/*
@@ -378,7 +380,7 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
{
ControlList controls(data->sensor_->controls());
- for (auto it : request->controls()) {
+ for (const auto &it : request->controls()) {
unsigned int id = it.first;
unsigned int offset;
uint32_t cid;
@@ -471,7 +473,15 @@ bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator)
data->ipa_->paramsBufferReady.connect(data.get(), &VimcCameraData::paramsBufferReady);
std::string conf = data->ipa_->configurationFile("vimc.conf");
- data->ipa_->init(IPASettings{ conf, data->sensor_->model() });
+ Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2;
+ Flags<ipa::vimc::TestFlag> outFlags;
+ data->ipa_->init(IPASettings{ conf, data->sensor_->model() },
+ ipa::vimc::IPAOperationInit, inFlags, &outFlags);
+
+ LOG(VIMC, Debug)
+ << "Flag 1 was "
+ << (outFlags & ipa::vimc::TestFlag::Flag1 ? "" : "not ")
+ << "set";
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
@@ -598,7 +608,7 @@ int VimcCameraData::allocateMockIPABuffers()
constexpr unsigned int kBufCount = 2;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::BGR888);
+ format.fourcc = video_->toV4L2PixelFormat(formats::BGR888);
format.size = Size (160, 120);
int ret = video_->setFormat(&format);
@@ -608,10 +618,11 @@ int VimcCameraData::allocateMockIPABuffers()
return video_->exportBuffers(kBufCount, &mockIPABufs_);
}
-void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id)
+void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id,
+ [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
{
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc, "vimc")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp
index 7ebd76ad..5ea2ca78 100644
--- a/src/libcamera/pipeline_handler.cpp
+++ b/src/libcamera/pipeline_handler.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * pipeline_handler.cpp - Pipeline handler infrastructure
+ * Pipeline handler infrastructure
*/
#include "libcamera/internal/pipeline_handler.h"
#include <chrono>
+#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <libcamera/base/log.h>
@@ -15,10 +16,11 @@
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
-#include <libcamera/camera_manager.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_manager.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/media_device.h"
@@ -64,11 +66,10 @@ LOG_DEFINE_CATEGORY(Pipeline)
*
* In order to honour the std::enable_shared_from_this<> contract,
* PipelineHandler instances shall never be constructed manually, but always
- * through the PipelineHandlerFactory::create() function implemented by the
- * respective factories.
+ * through the PipelineHandlerFactoryBase::create() function.
*/
PipelineHandler::PipelineHandler(CameraManager *manager)
- : manager_(manager), lockOwner_(false)
+ : manager_(manager), useCount_(0)
{
}
@@ -143,58 +144,89 @@ MediaDevice *PipelineHandler::acquireMediaDevice(DeviceEnumerator *enumerator,
}
/**
- * \brief Lock all media devices acquired by the pipeline
+ * \brief Acquire exclusive access to the pipeline handler for the process
*
- * This function shall not be called from pipeline handler implementation, as
- * the Camera class handles locking directly.
+ * This function locks all the media devices used by the pipeline to ensure
+ * that no other process can access them concurrently.
+ *
+ * Access to a pipeline handler may be acquired recursively from within the
+ * same process. Every successful acquire() call shall be matched with a
+ * release() call. This allows concurrent access to the same pipeline handler
+ * from different cameras within the same process.
+ *
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
*
* \context This function is \threadsafe.
*
- * \return True if the devices could be locked, false otherwise
- * \sa unlock()
- * \sa MediaDevice::lock()
+ * \return True if the pipeline handler was acquired, false if another process
+ * has already acquired it
+ * \sa release()
*/
-bool PipelineHandler::lock()
+bool PipelineHandler::acquire()
{
MutexLocker locker(lock_);
- /* Do not allow nested locking in the same libcamera instance. */
- if (lockOwner_)
- return false;
+ if (useCount_) {
+ ++useCount_;
+ return true;
+ }
for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
if (!media->lock()) {
- unlock();
+ unlockMediaDevices();
return false;
}
}
- lockOwner_ = true;
-
+ ++useCount_;
return true;
}
/**
- * \brief Unlock all media devices acquired by the pipeline
+ * \brief Release exclusive access to the pipeline handler
+ * \param[in] camera The camera for which to release data
+ *
+ * This function releases access to the pipeline handler previously acquired by
+ * a call to acquire(). Every release() call shall match a previous successful
+ * acquire() call. Calling this function on a pipeline handler that hasn't been
+ * acquired results in undefined behaviour.
*
- * This function shall not be called from pipeline handler implementation, as
- * the Camera class handles locking directly.
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
*
* \context This function is \threadsafe.
*
- * \sa lock()
+ * \sa acquire()
*/
-void PipelineHandler::unlock()
+void PipelineHandler::release(Camera *camera)
{
MutexLocker locker(lock_);
- if (!lockOwner_)
- return;
+ ASSERT(useCount_);
+
+ unlockMediaDevices();
+ releaseDevice(camera);
+
+ --useCount_;
+}
+
+/**
+ * \brief Release resources associated with this camera
+ * \param[in] camera The camera for which to release resources
+ *
+ * Pipeline handlers may override this in order to perform cleanup operations
+ * when a camera is released, such as freeing memory.
+ */
+void PipelineHandler::releaseDevice([[maybe_unused]] Camera *camera)
+{
+}
+
+void PipelineHandler::unlockMediaDevices()
+{
for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
media->unlock();
-
- lockOwner_ = false;
}
/**
@@ -217,8 +249,7 @@ void PipelineHandler::unlock()
* handler.
*
* \return A valid CameraConfiguration if the requested roles can be satisfied,
- * or a null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * or a null pointer otherwise.
*/
/**
@@ -312,6 +343,8 @@ void PipelineHandler::stop(Camera *camera)
/* Make sure no requests are pending. */
Camera::Private *data = camera->_d();
ASSERT(data->queuedRequests_.empty());
+
+ data->requestSequence_ = 0;
}
/**
@@ -504,6 +537,62 @@ void PipelineHandler::completeRequest(Request *request)
}
/**
+ * \brief Retrieve the absolute path to a platform configuration file
+ * \param[in] subdir The pipeline handler specific subdirectory name
+ * \param[in] name The configuration file name
+ *
+ * This function locates a named platform configuration file and returns
+ * its absolute path to the pipeline handler. It searches the following
+ * directories, in order:
+ *
+ * - If libcamera is not installed, the src/libcamera/pipeline/\<subdir\>/data/
+ * directory within the source tree ; otherwise
+ * - The system data (share/libcamera/pipeline/\<subdir\>) directory.
+ *
+ * The system directories are not searched if libcamera is not installed.
+ *
+ * \return The full path to the pipeline handler configuration file, or an empty
+ * string if no configuration file can be found
+ */
+std::string PipelineHandler::configurationFile(const std::string &subdir,
+ const std::string &name) const
+{
+ std::string confPath;
+ struct stat statbuf;
+ int ret;
+
+ std::string root = utils::libcameraSourcePath();
+ if (!root.empty()) {
+ /*
+ * When libcamera is used before it is installed, load
+ * configuration files from the source directory. The
+ * configuration files are then located in the 'data'
+ * subdirectory of the corresponding pipeline handler.
+ */
+ std::string confDir = root + "src/libcamera/pipeline/";
+ confPath = confDir + subdir + "/data/" + name;
+
+ LOG(Pipeline, Info)
+ << "libcamera is not installed. Loading platform configuration file from '"
+ << confPath << "'";
+ } else {
+ /* Else look in the system locations. */
+ confPath = std::string(LIBCAMERA_DATA_DIR)
+ + "/pipeline/" + subdir + '/' + name;
+ }
+
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+
+ LOG(Pipeline, Error)
+ << "Configuration file '" << confPath
+ << "' not found for pipeline handler '" << PipelineHandler::name() << "'";
+
+ return std::string();
+}
+
+/**
* \brief Register a camera to the camera manager and pipeline handler
* \param[in] camera The camera to be added
*
@@ -524,7 +613,7 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
* Walk the entity list and map the devnums of all capture video nodes
* to the camera.
*/
- std::vector<dev_t> devnums;
+ std::vector<int64_t> devnums;
for (const std::shared_ptr<MediaDevice> &media : mediaDevices_) {
for (const MediaEntity *entity : media->entities()) {
if (entity->pads().size() == 1 &&
@@ -536,7 +625,14 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
}
}
- manager_->addCamera(std::move(camera), devnums);
+ /*
+ * Store the associated devices as a property of the camera to allow
+ * systems to identify which devices are managed by libcamera.
+ */
+ Camera::Private *data = camera->_d();
+ data->properties_.set(properties::SystemDevices, devnums);
+
+ manager_->_d()->addCamera(std::move(camera));
}
/**
@@ -553,7 +649,7 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
*/
void PipelineHandler::hotplugMediaDevice(MediaDevice *media)
{
- media->disconnected.connect(this, [=]() { mediaDeviceDisconnected(media); });
+ media->disconnected.connect(this, [this, media] { mediaDeviceDisconnected(media); });
}
/**
@@ -597,13 +693,13 @@ void PipelineHandler::disconnect()
*/
std::vector<std::weak_ptr<Camera>> cameras{ std::move(cameras_) };
- for (std::weak_ptr<Camera> ptr : cameras) {
+ for (const std::weak_ptr<Camera> &ptr : cameras) {
std::shared_ptr<Camera> camera = ptr.lock();
if (!camera)
continue;
camera->disconnect();
- manager_->removeCamera(camera);
+ manager_->_d()->removeCamera(camera);
}
}
@@ -624,27 +720,25 @@ void PipelineHandler::disconnect()
*/
/**
- * \class PipelineHandlerFactory
- * \brief Registration of PipelineHandler classes and creation of instances
+ * \class PipelineHandlerFactoryBase
+ * \brief Base class for pipeline handler factories
*
- * To facilitate discovery and instantiation of PipelineHandler classes, the
- * PipelineHandlerFactory class maintains a registry of pipeline handler
- * classes. Each PipelineHandler subclass shall register itself using the
- * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
- * instance of a PipelineHandlerFactory subclass and register it with the
- * static list of factories.
+ * The PipelineHandlerFactoryBase class is the base of all specializations of
+ * the PipelineHandlerFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
*/
/**
- * \brief Construct a pipeline handler factory
+ * \brief Construct a pipeline handler factory base
* \param[in] name Name of the pipeline handler class
*
- * Creating an instance of the factory registers is with the global list of
+ * Creating an instance of the factory base registers it with the global list of
* factories, accessible through the factories() function.
*
* The factory \a name is used for debug purpose and shall be unique.
*/
-PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+PipelineHandlerFactoryBase::PipelineHandlerFactoryBase(const char *name)
: name_(name)
{
registerType(this);
@@ -657,15 +751,15 @@ PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
* \return A shared pointer to a new instance of the PipelineHandler subclass
* corresponding to the factory
*/
-std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *manager)
+std::shared_ptr<PipelineHandler> PipelineHandlerFactoryBase::create(CameraManager *manager) const
{
- PipelineHandler *handler = createInstance(manager);
+ std::unique_ptr<PipelineHandler> handler = createInstance(manager);
handler->name_ = name_.c_str();
- return std::shared_ptr<PipelineHandler>(handler);
+ return std::shared_ptr<PipelineHandler>(std::move(handler));
}
/**
- * \fn PipelineHandlerFactory::name()
+ * \fn PipelineHandlerFactoryBase::name()
* \brief Retrieve the factory name
* \return The factory name
*/
@@ -677,9 +771,10 @@ std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *m
* The caller is responsible to guarantee the uniqueness of the pipeline handler
* name.
*/
-void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
+void PipelineHandlerFactoryBase::registerType(PipelineHandlerFactoryBase *factory)
{
- std::vector<PipelineHandlerFactory *> &factories = PipelineHandlerFactory::factories();
+ std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
factories.push_back(factory);
}
@@ -688,34 +783,77 @@ void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
* \brief Retrieve the list of all pipeline handler factories
* \return the list of pipeline handler factories
*/
-std::vector<PipelineHandlerFactory *> &PipelineHandlerFactory::factories()
+std::vector<PipelineHandlerFactoryBase *> &PipelineHandlerFactoryBase::factories()
{
/*
* The static factories map is defined inside the function to ensure
* it gets initialized on first use, without any dependency on
* link order.
*/
- static std::vector<PipelineHandlerFactory *> factories;
+ static std::vector<PipelineHandlerFactoryBase *> factories;
return factories;
}
/**
- * \fn PipelineHandlerFactory::createInstance()
- * \brief Create an instance of the PipelineHandler corresponding to the factory
- * \param[in] manager The camera manager
+ * \brief Return the factory for the pipeline handler with name \a name
+ * \param[in] name The pipeline handler name
+ * \return The factory of the pipeline with name \a name, or nullptr if not found
+ */
+const PipelineHandlerFactoryBase *PipelineHandlerFactoryBase::getFactoryByName(const std::string &name)
+{
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
+
+ auto iter = std::find_if(factories.begin(),
+ factories.end(),
+ [&name](const PipelineHandlerFactoryBase *f) {
+ return f->name() == name;
+ });
+
+ if (iter != factories.end())
+ return *iter;
+
+ return nullptr;
+}
+
+/**
+ * \class PipelineHandlerFactory
+ * \brief Registration of PipelineHandler classes and creation of instances
+ * \tparam _PipelineHandler The pipeline handler class type for this factory
*
- * This virtual function is implemented by the REGISTER_PIPELINE_HANDLER()
- * macro. It creates a pipeline handler instance associated with the camera
- * \a manager.
+ * To facilitate discovery and instantiation of PipelineHandler classes, the
+ * PipelineHandlerFactory class implements auto-registration of pipeline
+ * handlers. Each PipelineHandler subclass shall register itself using the
+ * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
+ * instance of a PipelineHandlerFactory and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+ * \brief Construct a pipeline handler factory
+ * \param[in] name Name of the pipeline handler class
*
- * \return a pointer to a newly constructed instance of the PipelineHandler
- * subclass corresponding to the factory
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used for debug purpose and shall be unique.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::createInstance() const
+ * \brief Create an instance of the PipelineHandler corresponding to the factory
+ * \param[in] manager The camera manager
+ * \return A unique pointer to a newly constructed instance of the
+ * PipelineHandler subclass corresponding to the factory
*/
/**
* \def REGISTER_PIPELINE_HANDLER
* \brief Register a pipeline handler with the pipeline handler factory
* \param[in] handler Class name of PipelineHandler derived class to register
+ * \param[in] name Name assigned to the pipeline handler, matching the pipeline
+ * subdirectory name in the source tree.
*
* Register a PipelineHandler subclass with the factory and make it available to
* try and match devices.
diff --git a/src/libcamera/pixel_format.cpp b/src/libcamera/pixel_format.cpp
index 80c22072..314179a8 100644
--- a/src/libcamera/pixel_format.cpp
+++ b/src/libcamera/pixel_format.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * pixel_format.cpp - libcamera Pixel Format
+ * libcamera Pixel Format
*/
#include <libcamera/formats.h>
diff --git a/src/libcamera/process.cpp b/src/libcamera/process.cpp
index 0e6b4e1d..86d27b2d 100644
--- a/src/libcamera/process.cpp
+++ b/src/libcamera/process.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process.cpp - Process object
+ * Process object
*/
#include "libcamera/internal/process.h"
@@ -263,7 +263,9 @@ int Process::start(const std::string &path,
closeAllFdsExcept(fds);
- unsetenv("LIBCAMERA_LOG_FILE");
+ const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
+ if (file && strcmp(file, "syslog"))
+ unsetenv("LIBCAMERA_LOG_FILE");
const char **argv = new const char *[args.size() + 2];
unsigned int len = args.size();
diff --git a/src/libcamera/property_ids.cpp.in b/src/libcamera/property_ids.cpp.in
index f917e334..8b274c38 100644
--- a/src/libcamera/property_ids.cpp.in
+++ b/src/libcamera/property_ids.cpp.in
@@ -23,14 +23,7 @@ namespace properties {
${controls_doc}
-/**
- * \brief Namespace for libcamera draft properties
- */
-namespace draft {
-
-${draft_controls_doc}
-
-} /* namespace draft */
+${vendor_controls_doc}
#ifndef __DOXYGEN__
/*
@@ -39,11 +32,8 @@ ${draft_controls_doc}
*/
${controls_def}
-namespace draft {
-
-${draft_controls_def}
+${vendor_controls_def}
-} /* namespace draft */
#endif
/**
diff --git a/src/libcamera/property_ids.yaml b/src/libcamera/property_ids_core.yaml
index 11b7ebdc..834454a4 100644
--- a/src/libcamera/property_ids.yaml
+++ b/src/libcamera/property_ids_core.yaml
@@ -2,8 +2,9 @@
#
# Copyright (C) 2019, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
+vendor: libcamera
controls:
- Location:
type: int32_t
@@ -29,10 +30,10 @@ controls:
- Rotation:
type: int32_t
description: |
- The camera rotation is expressed as the angular difference in degrees
- between two reference systems, one relative to the camera module, and
- one defined on the external world scene to be captured when projected
- on the image sensor pixel array.
+ The camera physical mounting rotation. It is expressed as the angular
+ difference in degrees between two reference systems, one relative to the
+ camera module, and one defined on the external world scene to be
+ captured when projected on the image sensor pixel array.
A camera sensor has a 2-dimensional reference system 'Rc' defined by
its pixel array read-out order. The origin is set to the first pixel
@@ -690,37 +691,14 @@ controls:
that is twice that of the full resolution mode. This value will be valid
after the configure method has returned successfully.
- # ----------------------------------------------------------------------------
- # Draft properties section
-
- - ColorFilterArrangement:
- type: int32_t
- draft: true
+ - SystemDevices:
+ type: int64_t
+ size: [n]
description: |
- The arrangement of color filters on sensor; represents the colors in the
- top-left 2x2 section of the sensor, in reading order. Currently
- identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT.
- enum:
- - name: RGGB
- value: 0
- description: RGGB Bayer pattern
- - name: GRBG
- value: 1
- description: GRBG Bayer pattern
- - name: GBRG
- value: 2
- description: GBRG Bayer pattern
- - name: BGGR
- value: 3
- description: BGGR Bayer pattern
- - name: RGB
- value: 4
- description: |
- Sensor is not Bayer; output has 3 16-bit values for each pixel,
- instead of just 1 16-bit value per pixel.
- - name: MONO
- value: 5
- description: |
- Sensor is not Bayer; output consists of a single colour channel.
+ A list of integer values of type dev_t denoting the major and minor
+ device numbers of the underlying devices used in the operation of this
+ camera.
+
+ Different cameras may report identical devices.
...
diff --git a/src/libcamera/property_ids_draft.yaml b/src/libcamera/property_ids_draft.yaml
new file mode 100644
index 00000000..62f0e242
--- /dev/null
+++ b/src/libcamera/property_ids_draft.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+vendor: draft
+controls:
+ - ColorFilterArrangement:
+ type: int32_t
+ vendor: draft
+ description: |
+ The arrangement of color filters on sensor; represents the colors in the
+ top-left 2x2 section of the sensor, in reading order. Currently
+ identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT.
+ enum:
+ - name: RGGB
+ value: 0
+ description: RGGB Bayer pattern
+ - name: GRBG
+ value: 1
+ description: GRBG Bayer pattern
+ - name: GBRG
+ value: 2
+ description: GBRG Bayer pattern
+ - name: BGGR
+ value: 3
+ description: BGGR Bayer pattern
+ - name: RGB
+ value: 4
+ description: |
+ Sensor is not Bayer; output has 3 16-bit values for each pixel,
+ instead of just 1 16-bit value per pixel.
+ - name: MONO
+ value: 5
+ description: |
+ Sensor is not Bayer; output consists of a single colour channel.
+
+...
diff --git a/src/libcamera/proxy/worker/meson.build b/src/libcamera/proxy/worker/meson.build
index 70c8760a..aa4d9cd7 100644
--- a/src/libcamera/proxy/worker/meson.build
+++ b/src/libcamera/proxy/worker/meson.build
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-proxy_install_dir = get_option('libexecdir') / 'libcamera'
+proxy_install_dir = libcamera_libexecdir
# generate {pipeline}_ipa_proxy_worker.cpp
foreach mojom : ipa_mojoms
diff --git a/src/libcamera/pub_key.cpp b/src/libcamera/pub_key.cpp
index 9bb08fda..f1d73a5c 100644
--- a/src/libcamera/pub_key.cpp
+++ b/src/libcamera/pub_key.cpp
@@ -2,12 +2,17 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * pub_key.cpp - Public key signature verification
+ * Public key signature verification
*/
#include "libcamera/internal/pub_key.h"
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+#include <openssl/evp.h>
+#include <openssl/rsa.h>
+#include <openssl/sha.h>
+#include <openssl/x509.h>
+#elif HAVE_GNUTLS
#include <gnutls/abstract.h>
#endif
@@ -33,7 +38,14 @@ namespace libcamera {
PubKey::PubKey([[maybe_unused]] Span<const uint8_t> key)
: valid_(false)
{
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+ const uint8_t *data = key.data();
+ pubkey_ = d2i_PUBKEY(nullptr, &data, key.size());
+ if (!pubkey_)
+ return;
+
+ valid_ = true;
+#elif HAVE_GNUTLS
int ret = gnutls_pubkey_init(&pubkey_);
if (ret < 0)
return;
@@ -52,7 +64,9 @@ PubKey::PubKey([[maybe_unused]] Span<const uint8_t> key)
PubKey::~PubKey()
{
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+ EVP_PKEY_free(pubkey_);
+#elif HAVE_GNUTLS
gnutls_pubkey_deinit(pubkey_);
#endif
}
@@ -76,7 +90,35 @@ PubKey::~PubKey()
bool PubKey::verify([[maybe_unused]] Span<const uint8_t> data,
[[maybe_unused]] Span<const uint8_t> sig) const
{
-#if HAVE_GNUTLS
+ if (!valid_)
+ return false;
+
+#if HAVE_CRYPTO
+ /*
+ * Create and initialize a public key algorithm context for signature
+ * verification.
+ */
+ EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(pubkey_, nullptr);
+ if (!ctx)
+ return false;
+
+ if (EVP_PKEY_verify_init(ctx) <= 0 ||
+ EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0 ||
+ EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()) <= 0) {
+ EVP_PKEY_CTX_free(ctx);
+ return false;
+ }
+
+ /* Calculate the SHA256 digest of the data. */
+ uint8_t digest[SHA256_DIGEST_LENGTH];
+ SHA256(data.data(), data.size(), digest);
+
+ /* Decrypt the signature and verify it matches the digest. */
+ int ret = EVP_PKEY_verify(ctx, sig.data(), sig.size(), digest,
+ SHA256_DIGEST_LENGTH);
+ EVP_PKEY_CTX_free(ctx);
+ return ret == 1;
+#elif HAVE_GNUTLS
const gnutls_datum_t gnuTlsData{
const_cast<unsigned char *>(data.data()),
static_cast<unsigned int>(data.size())
diff --git a/src/libcamera/request.cpp b/src/libcamera/request.cpp
index 51d74b29..cfb451e9 100644
--- a/src/libcamera/request.cpp
+++ b/src/libcamera/request.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.cpp - Capture request handling
+ * Capture request handling
*/
#include "libcamera/internal/request.h"
@@ -158,9 +158,12 @@ void Request::Private::cancel()
}
/**
- * \copydoc Request::reuse()
+ * \brief Reset the request internal data to default values
+ *
+ * After calling this function, all request internal data will have default
+ * values as if the Request::Private instance had just been constructed.
*/
-void Request::Private::reuse()
+void Request::Private::reset()
{
sequence_ = 0;
cancelled_ = false;
@@ -349,7 +352,7 @@ Request::Request(Camera *camera, uint64_t cookie)
camera->_d()->validator());
/**
- * \todo: Add a validator for metadata controls.
+ * \todo Add a validator for metadata controls.
*/
metadata_ = new ControlList(controls::controls);
@@ -380,7 +383,7 @@ void Request::reuse(ReuseFlag flags)
{
LIBCAMERA_TRACEPOINT(request_reuse, this);
- _d()->reuse();
+ _d()->reset();
if (flags & ReuseBuffers) {
for (auto pair : bufferMap_) {
@@ -526,8 +529,8 @@ FrameBuffer *Request::findBuffer(const Stream *stream) const
*
* When requests are queued, they are given a sequential number to track the
* order in which requests are queued to a camera. This number counts all
- * requests given to a camera through its lifetime, and is not reset to zero
- * between camera stop/start sequences.
+ * requests given to a camera and is reset to zero between camera stop/start
+ * sequences.
*
* It can be used to support debugging and identifying the flow of requests
* through a pipeline, but does not guarantee to represent the sequence number
diff --git a/src/libcamera/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp
index d055c16a..c6d7f801 100644
--- a/src/libcamera/camera_sensor.cpp
+++ b/src/libcamera/sensor/camera_sensor.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_sensor.cpp - A camera sensor
+ * A camera sensor
*/
#include "libcamera/internal/camera_sensor.h"
@@ -15,6 +15,8 @@
#include <math.h>
#include <string.h>
+#include <libcamera/camera.h>
+#include <libcamera/orientation.h>
#include <libcamera/property_ids.h>
#include <libcamera/base/utils.h>
@@ -55,7 +57,8 @@ LOG_DEFINE_CATEGORY(CameraSensor)
*/
CameraSensor::CameraSensor(const MediaEntity *entity)
: entity_(entity), pad_(UINT_MAX), staticProps_(nullptr),
- bayerFormat_(nullptr), properties_(properties::properties)
+ bayerFormat_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
{
}
@@ -152,7 +155,12 @@ int CameraSensor::init()
*/
if (entity_->device()->driver() == "vimc") {
initVimcDefaultProperties();
- return initProperties();
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ return discoverAncillaryDevices();
}
/* Get the color filter array pattern (only for RAW sensors). */
@@ -176,9 +184,48 @@ int CameraSensor::init()
if (ret)
return ret;
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
}
+int CameraSensor::generateId()
+{
+ const std::string devPath = subdev_->devicePath();
+
+ /* Try to get ID from firmware description. */
+ id_ = sysfs::firmwareNodePath(devPath);
+ if (!id_.empty())
+ return 0;
+
+ /*
+ * Virtual sensors not described in firmware
+ *
+ * Verify it's a platform device and construct ID from the device path
+ * and model of sensor.
+ */
+ if (devPath.find("/sys/devices/platform/", 0) == 0) {
+ id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ return 0;
+ }
+
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+}
+
int CameraSensor::validateSensorDriver()
{
int err = 0;
@@ -217,6 +264,26 @@ int CameraSensor::validateSensorDriver()
}
/*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
* Make sure the required selection targets are supported.
*
* Failures in reading any of the targets are not deemed to be fatal,
@@ -275,6 +342,7 @@ int CameraSensor::validateSensorDriver()
* required by the CameraSensor class.
*/
static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
V4L2_CID_EXPOSURE,
V4L2_CID_HBLANK,
V4L2_CID_PIXEL_RATE,
@@ -384,18 +452,18 @@ int CameraSensor::initProperties()
/* Retrieve and register properties from the kernel interface. */
const ControlInfoMap &controls = subdev_->controls();
- int32_t propertyValue;
const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
if (orientation != controls.end()) {
int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
switch (v4l2Orientation) {
default:
LOG(CameraSensor, Warning)
<< "Unsupported camera location "
<< v4l2Orientation << ", setting to External";
- /* Fall-through */
+ [[fallthrough]];
case V4L2_CAMERA_ORIENTATION_EXTERNAL:
propertyValue = properties::CameraLocationExternal;
break;
@@ -413,8 +481,27 @@ int CameraSensor::initProperties()
const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
if (rotationControl != controls.end()) {
- propertyValue = rotationControl->second.def().get<int32_t>();
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
}
properties_.set(properties::PixelArraySize, pixelArraySize_);
@@ -467,8 +554,8 @@ int CameraSensor::discoverAncillaryDevices()
ret = focusLens_->init();
if (ret) {
LOG(CameraSensor, Error)
- << "CameraLens initialisation failed";
- return ret;
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
}
break;
@@ -510,6 +597,21 @@ int CameraSensor::discoverAncillaryDevices()
*/
/**
+ * \fn CameraSensor::device()
+ * \brief Retrieve the camera sensor device
+ * \todo Remove this function by integrating DelayedControl with CameraSensor
+ * \return The camera sensor device
+ */
+
+/**
+ * \fn CameraSensor::focusLens()
+ * \brief Retrieve the focus lens controller
+ *
+ * \return The focus lens controller. nullptr if no focus lens controller is
+ * connected to the sensor
+ */
+
+/**
* \fn CameraSensor::mbusCodes()
* \brief Retrieve the media bus codes supported by the camera sensor
*
@@ -562,64 +664,6 @@ Size CameraSensor::resolution() const
}
/**
- * \fn CameraSensor::testPatternModes()
- * \brief Retrieve all the supported test pattern modes of the camera sensor
- * The test pattern mode values correspond to the controls::TestPattern control.
- *
- * \return The list of test pattern modes
- */
-
-/**
- * \brief Set the test pattern mode for the camera sensor
- * \param[in] mode The test pattern mode
- *
- * The new \a mode is applied to the sensor if it differs from the active test
- * pattern mode. Otherwise, this function is a no-op. Setting the same test
- * pattern mode for every frame thus incurs no performance penalty.
- */
-int CameraSensor::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
-{
- if (testPatternMode_ == mode)
- return 0;
-
- if (testPatternModes_.empty()) {
- LOG(CameraSensor, Error)
- << "Camera sensor does not support test pattern modes.";
- return -EINVAL;
- }
-
- return applyTestPatternMode(mode);
-}
-
-int CameraSensor::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
-{
- if (testPatternModes_.empty())
- return 0;
-
- auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
- mode);
- if (it == testPatternModes_.end()) {
- LOG(CameraSensor, Error) << "Unsupported test pattern mode "
- << mode;
- return -EINVAL;
- }
-
- LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
-
- int32_t index = staticProps_->testPatternModes.at(mode);
- ControlList ctrls{ controls() };
- ctrls.set(V4L2_CID_TEST_PATTERN, index);
-
- int ret = setControls(&ctrls);
- if (ret)
- return ret;
-
- testPatternMode_ = mode;
-
- return 0;
-}
-
-/**
* \brief Retrieve the best sensor format for a desired output
* \param[in] mbusCodes The list of acceptable media bus codes
* \param[in] size The desired size
@@ -699,7 +743,7 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu
}
V4L2SubdeviceFormat format{
- .mbus_code = bestCode,
+ .code = bestCode,
.size = *bestSize,
.colorSpace = ColorSpace::Raw,
};
@@ -710,96 +754,143 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu
/**
* \brief Set the sensor output format
* \param[in] format The desired sensor output format
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity.
+ *
+ * If flips are writable they are configured according to the desired Transform.
+ * Transform::Identity always corresponds to H/V flip being disabled if the
+ * controls are writable. Flips are set before the new format is applied as
+ * they can effectively change the Bayer pattern ordering.
*
* The ranges of any controls associated with the sensor are also updated.
*
* \return 0 on success or a negative error code otherwise
*/
-int CameraSensor::setFormat(V4L2SubdeviceFormat *format)
+int CameraSensor::setFormat(V4L2SubdeviceFormat *format, Transform transform)
{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
int ret = subdev_->setFormat(pad_, format);
if (ret)
return ret;
- updateControlInfo();
+ subdev_->updateControlInfo();
return 0;
}
/**
- * \brief Retrieve the supported V4L2 controls and their information
- *
- * Control information is updated automatically to reflect the current sensor
- * configuration when the setFormat() function is called, without invalidating
- * any iterator on the ControlInfoMap. A manual update can also be forced by
- * calling the updateControlInfo() function for pipeline handlers that change
- * the sensor configuration wihtout using setFormat().
- *
- * \return A map of the V4L2 controls supported by the sensor
- */
-const ControlInfoMap &CameraSensor::controls() const
-{
- return subdev_->controls();
-}
-
-/**
- * \brief Read V4L2 controls from the sensor
- * \param[in] ids The list of controls to read, specified by their ID
- *
- * This function reads the value of all controls contained in \a ids, and
- * returns their values as a ControlList. The control identifiers are defined by
- * the V4L2 specification (V4L2_CID_*).
+ * \brief Try the sensor output format
+ * \param[in] format The desired sensor output format
*
- * If any control in \a ids is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
- * during validation of the requested controls, no control is read and this
- * function returns an empty control list.
+ * The ranges of any controls associated with the sensor are not updated.
*
- * \sa V4L2Device::getControls()
+ * \todo Add support for Transform by changing the format's Bayer ordering
+ * before calling subdev_->setFormat().
*
- * \return The control values in a ControlList on success, or an empty list on
- * error
+ * \return 0 on success or a negative error code otherwise
*/
-ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids)
+int CameraSensor::tryFormat(V4L2SubdeviceFormat *format) const
{
- return subdev_->getControls(ids);
+ return subdev_->setFormat(pad_, format,
+ V4L2Subdevice::Whence::TryFormat);
}
/**
- * \brief Write V4L2 controls to the sensor
- * \param[in] ctrls The list of controls to write
+ * \brief Apply a sensor configuration to the camera sensor
+ * \param[in] config The sensor configuration
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity
+ * \param[out] sensorFormat Format applied to the sensor (optional)
*
- * This function writes the value of all controls contained in \a ctrls, and
- * stores the values actually applied to the device in the corresponding \a
- * ctrls entry. The control identifiers are defined by the V4L2 specification
- * (V4L2_CID_*).
+ * Apply to the camera sensor the configuration \a config.
*
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
- * error occurs during validation of the requested controls, no control is
- * written and this function returns -EINVAL.
+ * \todo The configuration shall be fully populated and if any of the fields
+ * specified cannot be applied exactly, an error code is returned.
*
- * If an error occurs while writing the controls, the index of the first
- * control that couldn't be written is returned. All controls below that index
- * are written and their values are updated in \a ctrls, while all other
- * controls are not written and their values are not changed.
- *
- * \sa V4L2Device::setControls()
- *
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
+ * \return 0 if \a config is applied correctly to the camera sensor, a negative
+ * error code otherwise
*/
-int CameraSensor::setControls(ControlList *ctrls)
+int CameraSensor::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
{
- return subdev_->setControls(ctrls);
-}
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
-/**
- * \fn CameraSensor::device()
- * \brief Retrieve the camera sensor device
- * \todo Remove this function by integrating DelayedControl with CameraSensor
- * \return The camera sensor device
- */
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
/**
* \fn CameraSensor::properties()
@@ -819,10 +910,6 @@ int CameraSensor::setControls(ControlList *ctrls)
* Sensor information is only available for raw sensors. When called for a YUV
* sensor, this function returns -EINVAL.
*
- * Pipeline handlers that do not change the sensor format using the setFormat()
- * function may need to call updateControlInfo() beforehand, to ensure all the
- * control ranges are up to date.
- *
* \return 0 on success, a negative error code otherwise
*/
int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
@@ -866,9 +953,13 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
ret = subdev_->getFormat(pad_, &format);
if (ret)
return ret;
- info->bitsPerPixel = format.bitsPerPixel();
+
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
info->outputSize = format.size;
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
/*
* Retrieve the pixel rate, line length and minimum/maximum frame
* duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
@@ -883,10 +974,12 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
return -EINVAL;
}
- int32_t hblank = ctrls.get(V4L2_CID_HBLANK).get<int32_t>();
- info->lineLength = info->outputSize.width + hblank;
info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
@@ -895,50 +988,220 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
}
/**
- * \fn void CameraSensor::updateControlInfo()
- * \brief Update the sensor's ControlInfoMap in case they have changed
- * \sa V4L2Device::updateControlInfo()
+ * \brief Compute the Transform that gives the requested \a orientation
+ * \param[inout] orientation The desired image orientation
+ *
+ * This function computes the Transform that the pipeline handler should apply
+ * to the CameraSensor to obtain the requested \a orientation.
+ *
+ * The intended caller of this function is the validate() implementation of
+ * pipeline handlers, that pass in the application requested
+ * CameraConfiguration::orientation and obtain a Transform to apply to the
+ * camera sensor, likely at configure() time.
+ *
+ * If the requested \a orientation cannot be obtained, the \a orientation
+ * parameter is adjusted to report the current image orientation and
+ * Transform::Identity is returned.
+ *
+ * If the requested \a orientation can be obtained, the function computes a
+ * Transform and does not adjust \a orientation.
+ *
+ * Pipeline handlers are expected to verify if \a orientation has been
+ * adjusted by this function and set the CameraConfiguration::status to
+ * Adjusted accordingly.
+ *
+ * \return A Transform instance that applied to the CameraSensor produces images
+ * with \a orientation
*/
-void CameraSensor::updateControlInfo()
+Transform CameraSensor::computeTransform(Orientation *orientation) const
{
- subdev_->updateControlInfo();
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
}
/**
- * \fn CameraSensor::focusLens()
- * \brief Retrieve the focus lens controller
+ * \brief Compute the Bayer order that results from the given Transform
+ * \param[in] t The Transform to apply to the sensor
*
- * \return The focus lens controller. nullptr if no focus lens controller is
- * connected to the sensor
+ * Some sensors change their Bayer order when they are h-flipped or v-flipped.
+ * This function computes and returns the Bayer order that would result from the
+ * given transform applied to the sensor.
+ *
+ * This function is valid only when the sensor produces raw Bayer formats.
+ *
+ * \return The Bayer order produced by the sensor when the Transform is applied
*/
+BayerFormat::Order CameraSensor::bayerOrder(Transform t) const
+{
+ /* Return a defined by meaningless value for non-Bayer sensors. */
+ if (!bayerFormat_)
+ return BayerFormat::Order::BGGR;
-std::string CameraSensor::logPrefix() const
+ if (!flipsAlterBayerOrder_)
+ return bayerFormat_->order;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ return bayerFormat_->transform(t).order;
+}
+
+/**
+ * \brief Retrieve the supported V4L2 controls and their information
+ *
+ * Control information is updated automatically to reflect the current sensor
+ * configuration when the setFormat() function is called, without invalidating
+ * any iterator on the ControlInfoMap.
+ *
+ * \return A map of the V4L2 controls supported by the sensor
+ */
+const ControlInfoMap &CameraSensor::controls() const
{
- return "'" + entity_->name() + "'";
+ return subdev_->controls();
}
-int CameraSensor::generateId()
+/**
+ * \brief Read V4L2 controls from the sensor
+ * \param[in] ids The list of controls to read, specified by their ID
+ *
+ * This function reads the value of all controls contained in \a ids, and
+ * returns their values as a ControlList. The control identifiers are defined by
+ * the V4L2 specification (V4L2_CID_*).
+ *
+ * If any control in \a ids is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
+ * during validation of the requested controls, no control is read and this
+ * function returns an empty control list.
+ *
+ * \sa V4L2Device::getControls()
+ *
+ * \return The control values in a ControlList on success, or an empty list on
+ * error
+ */
+ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids)
{
- const std::string devPath = subdev_->devicePath();
+ return subdev_->getControls(ids);
+}
- /* Try to get ID from firmware description. */
- id_ = sysfs::firmwareNodePath(devPath);
- if (!id_.empty())
+/**
+ * \brief Write V4L2 controls to the sensor
+ * \param[in] ctrls The list of controls to write
+ *
+ * This function writes the value of all controls contained in \a ctrls, and
+ * stores the values actually applied to the device in the corresponding \a
+ * ctrls entry. The control identifiers are defined by the V4L2 specification
+ * (V4L2_CID_*).
+ *
+ * If any control in \a ctrls is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
+ * error occurs during validation of the requested controls, no control is
+ * written and this function returns -EINVAL.
+ *
+ * If an error occurs while writing the controls, the index of the first
+ * control that couldn't be written is returned. All controls below that index
+ * are written and their values are updated in \a ctrls, while all other
+ * controls are not written and their values are not changed.
+ *
+ * \sa V4L2Device::setControls()
+ *
+ * \return 0 on success or an error code otherwise
+ * \retval -EINVAL One of the control is not supported or not accessible
+ * \retval i The index of the control that failed
+ */
+int CameraSensor::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+/**
+ * \fn CameraSensor::testPatternModes()
+ * \brief Retrieve all the supported test pattern modes of the camera sensor
+ * The test pattern mode values correspond to the controls::TestPattern control.
+ *
+ * \return The list of test pattern modes
+ */
+
+/**
+ * \brief Set the test pattern mode for the camera sensor
+ * \param[in] mode The test pattern mode
+ *
+ * The new \a mode is applied to the sensor if it differs from the active test
+ * pattern mode. Otherwise, this function is a no-op. Setting the same test
+ * pattern mode for every frame thus incurs no performance penalty.
+ */
+int CameraSensor::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
return 0;
- /*
- * Virtual sensors not described in firmware
- *
- * Verify it's a platform device and construct ID from the device path
- * and model of sensor.
- */
- if (devPath.find("/sys/devices/platform/", 0) == 0) {
- id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensor::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
}
- LOG(CameraSensor, Error) << "Can't generate sensor ID";
- return -EINVAL;
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensor::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
}
} /* namespace libcamera */
diff --git a/src/libcamera/camera_sensor_properties.cpp b/src/libcamera/sensor/camera_sensor_properties.cpp
index e5f27f06..b18524d8 100644
--- a/src/libcamera/camera_sensor_properties.cpp
+++ b/src/libcamera/sensor/camera_sensor_properties.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_properties.cpp - Database of camera sensor properties
+ * Database of camera sensor properties
*/
#include "libcamera/internal/camera_sensor_properties.h"
@@ -52,6 +52,15 @@ LOG_DEFINE_CATEGORY(CameraSensorProperties)
const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor)
{
static const std::map<std::string, const CameraSensorProperties> sensorProps = {
+ { "ar0521", {
+ .unitCellSize = { 2200, 2200 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ } },
{ "hi846", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
@@ -90,6 +99,10 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModePn9, 4 },
},
} },
+ { "imx283", {
+ .unitCellSize = { 2400, 2400 },
+ .testPatternModes = {},
+ } },
{ "imx290", {
.unitCellSize = { 2900, 2900 },
.testPatternModes = {},
@@ -98,10 +111,58 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
.unitCellSize = { 3450, 3450 },
.testPatternModes = {},
} },
+ { "imx327", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ } },
+ { "imx335", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {},
+ } },
+ { "imx415", {
+ .unitCellSize = { 1450, 1450 },
+ .testPatternModes = {},
+ } },
{ "imx477", {
.unitCellSize = { 1550, 1550 },
.testPatternModes = {},
} },
+ { "imx519", {
+ .unitCellSize = { 1220, 1220 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModePn9, 4 },
+ /*
+ * The driver reports ColorBars and ColorBarsFadeToGray as well but
+ * these two patterns do not comply with MIPI CCS v1.1 (Section 10.1).
+ */
+ },
+ } },
+ { "imx708", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ } },
+ { "ov2685", {
+ .unitCellSize = { 1750, 1750 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test pattern mode for:
+ * 3: "Random Data"
+ * 4: "Black White Square"
+ * 5: "Color Square"
+ */
+ },
+ } },
{ "ov2740", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
@@ -109,6 +170,19 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeColorBars, 1},
},
} },
+ { "ov4689", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2},
+ /*
+ * No corresponding test patterns in
+ * MIPI CCS specification for sensor's
+ * colorBarType2 and colorBarType3.
+ */
+ },
+ } },
{ "ov5640", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
@@ -146,6 +220,32 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
*/
},
} },
+ { "ov64a40", {
+ .unitCellSize = { 1008, 1008 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ } },
+ { "ov8858", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ } },
{ "ov8865", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
diff --git a/src/libcamera/sensor/meson.build b/src/libcamera/sensor/meson.build
new file mode 100644
index 00000000..bf4b131a
--- /dev/null
+++ b/src/libcamera/sensor/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'camera_sensor.cpp',
+ 'camera_sensor_properties.cpp',
+])
diff --git a/src/libcamera/shared_mem_object.cpp b/src/libcamera/shared_mem_object.cpp
new file mode 100644
index 00000000..809fbdaf
--- /dev/null
+++ b/src/libcamera/shared_mem_object.cpp
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Helpers for shared memory allocations
+ */
+
+#include "libcamera/internal/shared_mem_object.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+/**
+ * \file shared_mem_object.cpp
+ * \brief Helpers for shared memory allocations
+ */
+
+namespace libcamera {
+
+/**
+ * \class SharedMem
+ * \brief Helper class to allocate and manage memory shareable between processes
+ *
+ * SharedMem manages memory suitable for sharing between processes. When an
+ * instance is constructed, it allocates a memory buffer of the requested size
+ * backed by an anonymous file, using the memfd API.
+ *
+ * The allocated memory is exposed by the mem() function. If memory allocation
+ * fails, the function returns an empty Span. This can be also checked using the
+ * bool() operator.
+ *
+ * The file descriptor for the backing file is exposed as a SharedFD by the fd()
+ * function. It can be shared with other processes across IPC boundaries, which
+ * can then map the memory with mmap().
+ *
+ * A single memfd is created for every SharedMem. If there is a need to allocate
+ * a large number of objects in shared memory, these objects should be grouped
+ * together and use the shared memory allocated by a single SharedMem object if
+ * possible. This will help to minimize the number of created memfd's.
+ */
+
+SharedMem::SharedMem() = default;
+
+/**
+ * \brief Construct a SharedMem with memory of the given \a size
+ * \param[in] name Name of the SharedMem
+ * \param[in] size Size of the shared memory to allocate and map
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+SharedMem::SharedMem(const std::string &name, std::size_t size)
+{
+#if HAVE_MEMFD_CREATE
+ int fd = memfd_create(name.c_str(), MFD_CLOEXEC);
+#else
+ int fd = syscall(SYS_memfd_create, name.c_str(), MFD_CLOEXEC);
+#endif
+ if (fd < 0)
+ return;
+
+ fd_ = SharedFD(std::move(fd));
+ if (!fd_.isValid())
+ return;
+
+ if (ftruncate(fd_.get(), size) < 0) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ void *mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd_.get(), 0);
+ if (mem == MAP_FAILED) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ mem_ = { static_cast<uint8_t *>(mem), size };
+}
+
+/**
+ * \brief Move constructor for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem::SharedMem(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+}
+
+/**
+ * \brief Destroy the SharedMem instance
+ *
+ * Destroying an instance invalidates the memory mapping exposed with mem().
+ * Other mappings of the backing file, created in this or other processes with
+ * mmap(), remain valid.
+ *
+ * Similarly, other references to the backing file descriptor created by copying
+ * the SharedFD returned by fd() remain valid. The underlying memory will be
+ * freed only when all file descriptors that reference the anonymous file get
+ * closed.
+ */
+SharedMem::~SharedMem()
+{
+ if (!mem_.empty())
+ munmap(mem_.data(), mem_.size_bytes());
+}
+
+/**
+ * \brief Move assignment operator for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem &SharedMem::operator=(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+ return *this;
+}
+
+/**
+ * \fn const SharedFD &SharedMem::fd() const
+ * \brief Retrieve the file descriptor for the underlying shared memory
+ * \return The file descriptor, or an invalid SharedFD if allocation failed
+ */
+
+/**
+ * \fn Span<uint8_t> SharedMem::mem() const
+ * \brief Retrieve the underlying shared memory
+ * \return The memory buffer, or an empty Span if allocation failed
+ */
+
+/**
+ * \fn SharedMem::operator bool()
+ * \brief Check if the shared memory allocation succeeded
+ * \return True if allocation of the shared memory succeeded, false otherwise
+ */
+
+/**
+ * \class SharedMemObject
+ * \brief Helper class to allocate an object in shareable memory
+ * \tparam The object type
+ *
+ * The SharedMemObject class is a specialization of the SharedMem class that
+ * wraps an object of type \a T and constructs it in shareable memory. It uses
+ * the same underlying memory allocation and sharing mechanism as the SharedMem
+ * class.
+ *
+ * The wrapped object is constructed at the same time as the SharedMemObject
+ * instance, by forwarding the arguments passed to the SharedMemObject
+ * constructor. The underlying memory allocation is sized to the object \a T
+ * size. The bool() operator should be used to check the allocation was
+ * successful. The object can be accessed using the dereference operators
+ * operator*() and operator->().
+ *
+ * While no restriction on the type \a T is enforced, not all types are suitable
+ * for sharing between multiple processes. Most notably, any object type that
+ * contains pointer or reference members will likely cause issues. Even if those
+ * members refer to other members of the same object, the shared memory will be
+ * mapped at different addresses in different processes, and the pointers will
+ * not be valid.
+ *
+ * A new anonymous file is created for every SharedMemObject instance. If there
+ * is a need to share a large number of small objects, these objects should be
+ * grouped into a single larger object to limit the number of file descriptors.
+ *
+ * To share the object with other processes, see the SharedMem documentation.
+ */
+
+/**
+ * \var SharedMemObject::kSize
+ * \brief The size of the object stored in shared memory
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(const std::string &name, Args &&...args)
+ * \brief Construct a SharedMemObject
+ * \param[in] name Name of the SharedMemObject
+ * \param[in] args Arguments to pass to the constructor of the object T
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(SharedMemObject<T> &&rhs)
+ * \brief Move constructor for SharedMemObject
+ * \param[in] rhs The object to move
+ */
+
+/**
+ * \fn SharedMemObject::~SharedMemObject()
+ * \brief Destroy the SharedMemObject instance
+ *
+ * Destroying a SharedMemObject calls the wrapped T object's destructor. While
+ * the underlying memory may not be freed immediately if other mappings have
+ * been created manually (see SharedMem::~SharedMem() for more information), the
+ * stored object may be modified. Depending on the ~T() destructor, accessing
+ * the object after destruction of the SharedMemObject causes undefined
+ * behaviour. It is the responsibility of the user of this class to synchronize
+ * with other users who have access to the shared object.
+ */
+
+/**
+ * \fn SharedMemObject::operator=(SharedMemObject<T> &&rhs)
+ * \brief Move assignment operator for SharedMemObject
+ * \param[in] rhs The SharedMemObject object to take the data from
+ *
+ * Moving a SharedMemObject does not affect the stored object.
+ */
+
+/**
+ * \fn SharedMemObject::operator->()
+ * \brief Dereference the stored object
+ * \return Pointer to the stored object
+ */
+
+/**
+ * \fn const T *SharedMemObject::operator->() const
+ * \copydoc SharedMemObject::operator->
+ */
+
+/**
+ * \fn SharedMemObject::operator*()
+ * \brief Dereference the stored object
+ * \return Reference to the stored object
+ */
+
+/**
+ * \fn const T &SharedMemObject::operator*() const
+ * \copydoc SharedMemObject::operator*
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/TODO b/src/libcamera/software_isp/TODO
new file mode 100644
index 00000000..4fcee39b
--- /dev/null
+++ b/src/libcamera/software_isp/TODO
@@ -0,0 +1,279 @@
+1. Setting F_SEAL_SHRINK and F_SEAL_GROW after ftruncate()
+
+>> SharedMem::SharedMem(const std::string &name, std::size_t size)
+>> : name_(name), size_(size), mem_(nullptr)
+>>
+>> ...
+>>
+>> if (ftruncate(fd_.get(), size_) < 0)
+>> return;
+>
+> Should we set the GROW and SHRINK seals (in a separate patch) ?
+
+Yes, this can be done.
+Setting F_SEAL_SHRINK and F_SEAL_GROW after the ftruncate() call above could catch
+some potential errors related to improper access to the shared memory allocated by
+the SharedMemObject.
+
+---
+
+2. Reconsider stats sharing
+
+>>> +void SwStatsCpu::finishFrame(void)
+>>> +{
+>>> + *sharedStats_ = stats_;
+>>
+>> Is it more efficient to copy the stats instead of operating directly on
+>> the shared memory ?
+>
+> I inherited doing things this way from Andrey. I kept this because
+> we don't really have any synchronization with the IPA reading this.
+>
+> So the idea is to only touch this when the next set of statistics
+> is ready since we don't know when the IPA is done with accessing
+> the previous set of statistics ...
+>
+> This is both something which seems mostly a theoretic problem,
+> yet also definitely something which I think we need to fix.
+>
+> Maybe use a ringbuffer of stats buffers and pass the index into
+> the ringbuffer to the emit signal ?
+
+That would match how we deal with hardware ISPs, and I think that's a
+good idea. It will help decoupling the processing side from the IPA.
+
+---
+
+3. Remove statsReady signal
+
+> class SwStatsCpu
+> {
+> /**
+> * \brief Signals that the statistics are ready
+> */
+> Signal<> statsReady;
+
+But better, I wonder if the signal could be dropped completely. The
+SwStatsCpu class does not operate asynchronously. Shouldn't whoever
+calls the finishFrame() function then handle emitting the signal ?
+
+Now, the trouble is that this would be the DebayerCpu class, whose name
+doesn't indicate as a prime candidate to handle stats. However, it
+already exposes a getStatsFD() function, so we're already calling for
+trouble :-) Either that should be moved to somewhere else, or the class
+should be renamed. Considering that the class applies colour gains in
+addition to performing the interpolation, it may be more of a naming
+issue.
+
+Removing the signal and refactoring those classes doesn't have to be
+addressed now, I think it would be part of a larger refactoring
+(possibly also considering platforms that have no ISP but can produce
+stats in hardware, such as the i.MX7), but please keep it on your radar.
+
+---
+
+4. Hide internal representation of gains from callers
+
+> struct DebayerParams {
+> static constexpr unsigned int kGain10 = 256;
+
+Forcing the caller to deal with the internal representation of gains
+isn't nice, especially given that it precludes implementing gains of
+different precisions in different backend. Wouldn't it be better to pass
+the values as floating point numbers, and convert them to the internal
+representation in the implementation of process() before using them ?
+
+---
+
+5. Store ISP parameters in per-frame buffers
+
+> /**
+> * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> * \brief Process the bayer data into the requested format.
+> * \param[in] input The input buffer.
+> * \param[in] output The output buffer.
+> * \param[in] params The parameters to be used in debayering.
+> *
+> * \note DebayerParams is passed by value deliberately so that a copy is passed
+> * when this is run in another thread by invokeMethod().
+> */
+
+Possibly something to address later, by storing ISP parameters in
+per-frame buffers like we do for hardware ISPs.
+
+---
+
+6. Input buffer copying configuration
+
+> DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+> : stats_(std::move(stats)), gammaCorrection_(1.0)
+> {
+> enableInputMemcpy_ = true;
+
+Set this appropriately and/or make it configurable.
+
+---
+
+7. Performance measurement configuration
+
+> void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> /* Measure before emitting signals */
+> if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+> ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+> timespec frameEndTime = {};
+> clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+> frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+> if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+> const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+> DebayerCpu::kFramesToSkip;
+> LOG(Debayer, Info)
+> << "Processed " << measuredFrames
+> << " frames in " << frameProcessTime_ / 1000 << "us, "
+> << frameProcessTime_ / (1000 * measuredFrames)
+> << " us/frame";
+> }
+> }
+
+I wonder if there would be a way to control at runtime when/how to
+perform those measurements. Maybe that's a bit overkill.
+
+---
+
+8. DebayerCpu cleanups
+
+> >> class DebayerCpu : public Debayer, public Object
+> >> const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+> >
+> > This,
+>
+> Note the statistics pass-through stuff is sort of a necessary evil
+> since we want one main loop going over the data line by line and
+> doing both debayering as well as stats while the line is still
+> hot in the l2 cache. And things like the process2() and process4()
+> loops are highly CPU debayering specific so I don't think we should
+> move those out of the CpuDebayer code.
+
+Yes, that I understood from the review. "necessary evil" is indeed the
+right term :-) I expect it will take quite some design skills to balance
+the need for performances and the need for a maintainable architecture.
+
+> > plus the fact that this class handles colour gains and gamma,
+> > makes me thing we have either a naming issue, or an architecture issue.
+>
+> I agree that this does a bit more then debayering, although
+> the debayering really is the main thing it does.
+>
+> I guess the calculation of the rgb lookup tables which do the
+> color gains and gamma could be moved outside of this class,
+> that might even be beneficial for GPU based debayering assuming
+> that that is going to use rgb lookup tables too (it could
+> implement actual color gains + gamma correction in some different
+> way).
+>
+> I think this falls under the lets wait until we have a GPU
+> based SoftISP MVP/POC and then do some refactoring to see which
+> bits should go where.
+
+---
+
+8. Decouple pipeline and IPA naming
+
+> The current src/ipa/meson.build assumes the IPA name to match the
+> pipeline name. For this reason "-Dipas=simple" is used for the
+> Soft IPA module.
+
+This should be addressed.
+
+---
+
+9. Doxyfile cleanup
+
+>> diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile.in
+>> index a86ea6c1..2be8d47b 100644
+>> --- a/Documentation/Doxyfile.in
+>> +++ b/Documentation/Doxyfile.in
+>> @@ -44,6 +44,7 @@ EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+>> @TOP_SRCDIR@/src/libcamera/pipeline/ \
+>> @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+>> @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+>> + @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+> Why is this needed ?
+>
+>> @TOP_BUILDDIR@/src/libcamera/proxy/
+>> EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
+>> diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
+>> index f3b4881c..3352d08f 100644
+>> --- a/include/libcamera/ipa/meson.build
+>> +++ b/include/libcamera/ipa/meson.build
+>> @@ -65,6 +65,7 @@ pipeline_ipa_mojom_mapping = {
+>> 'ipu3': 'ipu3.mojom',
+>> 'rkisp1': 'rkisp1.mojom',
+>> 'rpi/vc4': 'raspberrypi.mojom',
+>> + 'simple': 'soft.mojom',
+>> 'vimc': 'vimc.mojom',
+>> }
+>> diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
+>> new file mode 100644
+>> index 00000000..c249bd75
+>> --- /dev/null
+>> +++ b/include/libcamera/ipa/soft.mojom
+>> @@ -0,0 +1,28 @@
+>> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
+>> +
+>> +/*
+>> + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+> Ah that's why.
+
+Yes, because, well... all the other IPAs were doing that...
+
+> It doesn't have to be done before merging, but could you
+> address this sooner than later ?
+
+---
+
+10. Switch to libipa/algorithm.h API in processStats
+
+>> void IPASoftSimple::processStats(const ControlList &sensorControls)
+>>
+> Do you envision switching to the libipa/algorithm.h API at some point ?
+
+At some point, yes.
+
+---
+
+11. Improve handling the sensor controls which take effect with a delay
+
+> void IPASoftSimple::processStats(const ControlList &sensorControls)
+> {
+> ...
+> /*
+> * AE / AGC, use 2 frames delay to make sure that the exposure and
+> * the gain set have applied to the camera sensor.
+> */
+> if (ignore_updates_ > 0) {
+> --ignore_updates_;
+> return;
+> }
+
+This could be handled better with DelayedControls.
+
+---
+
+12. Use DelayedControls class in ispStatsReady()
+
+> void SimpleCameraData::ispStatsReady()
+> {
+> swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN,
+> V4L2_CID_EXPOSURE }));
+
+You should use the DelayedControls class.
+
+---
+
+13. Improve black level and colour gains application
+
+I think the black level should eventually be moved before debayering, and
+ideally the colour gains as well. I understand the need for optimizations to
+lower the CPU consumption, but at the same time I don't feel comfortable
+building up on top of an implementation that may work a bit more by chance than
+by correctness, as that's not very maintainable.
diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp
new file mode 100644
index 00000000..efe75ea8
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.cpp
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer base class
+ */
+
+#include "debayer.h"
+
+namespace libcamera {
+
+/**
+ * \struct DebayerParams
+ * \brief Struct to hold the debayer parameters.
+ */
+
+/**
+ * \var DebayerParams::kGain10
+ * \brief const value for 1.0 gain
+ */
+
+/**
+ * \var DebayerParams::gainR
+ * \brief Red gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gainG
+ * \brief Green gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gainB
+ * \brief Blue gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gamma
+ * \brief Gamma correction, 1.0 is no correction
+ */
+
+/**
+ * \class Debayer
+ * \brief Base debayering class
+ *
+ * Base class that provides functions for setting up the debayering process.
+ */
+
+LOG_DEFINE_CATEGORY(Debayer)
+
+Debayer::~Debayer()
+{
+}
+
+/**
+ * \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+ * \brief Configure the debayer object according to the passed in parameters.
+ * \param[in] inputCfg The input configuration.
+ * \param[in] outputCfgs The output configurations.
+ *
+ * \return 0 on success, a negative errno on failure.
+ */
+
+/**
+ * \fn Size Debayer::patternSize(PixelFormat inputFormat)
+ * \brief Get the width and height at which the bayer pattern repeats.
+ * \param[in] inputFormat The input format.
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ *
+ * \return Pattern size or an empty size for unsupported inputFormats.
+ */
+
+/**
+ * \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat)
+ * \brief Get the supported output formats.
+ * \param[in] inputFormat The input format.
+ *
+ * \return All supported output formats or an empty vector if there are none.
+ */
+
+/**
+ * \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+ * \brief Get the stride and the frame size.
+ * \param[in] outputFormat The output format.
+ * \param[in] size The output size.
+ *
+ * \return A tuple of the stride and the frame size, or a tuple with 0,0 if
+ * there is no valid output config.
+ */
+
+/**
+ * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+ * \brief Process the bayer data into the requested format.
+ * \param[in] input The input buffer.
+ * \param[in] output The output buffer.
+ * \param[in] params The parameters to be used in debayering.
+ *
+ * \note DebayerParams is passed by value deliberately so that a copy is passed
+ * when this is run in another thread by invokeMethod().
+ */
+
+/**
+ * \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize)
+ * \brief Get the supported output sizes for the given input format and size.
+ * \param[in] inputFormat The input format.
+ * \param[in] inputSize The input size.
+ *
+ * \return The valid size ranges or an empty range if there are none.
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::inputBufferReady
+ * \brief Signals when the input buffer is ready.
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::outputBufferReady
+ * \brief Signals when the output buffer is ready.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h
new file mode 100644
index 00000000..c151fe5d
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayering base class
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+
+LOG_DECLARE_CATEGORY(Debayer)
+
+class Debayer
+{
+public:
+ virtual ~Debayer() = 0;
+
+ virtual int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
+
+ virtual std::vector<PixelFormat> formats(PixelFormat inputFormat) = 0;
+
+ virtual std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0;
+
+ virtual void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0;
+
+ virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0;
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+
+private:
+ virtual Size patternSize(PixelFormat inputFormat) = 0;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp
new file mode 100644
index 00000000..8254bbe9
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.cpp
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering class
+ */
+
+#include "debayer_cpu.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+namespace libcamera {
+
+/**
+ * \class DebayerCpu
+ * \brief Class for debayering on the CPU
+ *
+ * Implementation for CPU based debayering
+ */
+
+/**
+ * \brief Constructs a DebayerCpu object
+ * \param[in] stats Pointer to the stats object to use
+ */
+DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+ : stats_(std::move(stats)), gammaCorrection_(1.0), blackLevel_(0)
+{
+ /*
+ * Reading from uncached buffers may be very slow.
+ * In such a case, it's better to copy input buffer data to normal memory.
+ * But in case of cached buffers, copying the data is unnecessary overhead.
+ * enable_input_memcpy_ makes this behavior configurable. At the moment, we
+ * always set it to true as the safer choice but this should be changed in
+ * future.
+ */
+ enableInputMemcpy_ = true;
+
+ /* Initialize gamma to 1.0 curve */
+ for (unsigned int i = 0; i < kGammaLookupSize; i++)
+ gamma_[i] = i / (kGammaLookupSize / kRGBLookupSize);
+
+ for (unsigned int i = 0; i < kMaxLineBuffers; i++)
+ lineBuffers_[i] = nullptr;
+}
+
+DebayerCpu::~DebayerCpu()
+{
+ for (unsigned int i = 0; i < kMaxLineBuffers; i++)
+ free(lineBuffers_[i]);
+}
+
+#define DECLARE_SRC_POINTERS(pixel_t) \
+ const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \
+ const pixel_t *curr = (const pixel_t *)src[1] + xShift_; \
+ const pixel_t *next = (const pixel_t *)src[2] + xShift_;
+
+/*
+ * RGR
+ * GBG
+ * RGR
+ */
+#define BGGR_BGR888(p, n, div) \
+ *dst++ = blue_[curr[x] / (div)]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ x++;
+
+/*
+ * GBG
+ * RGR
+ * GBG
+ */
+#define GRBG_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x] + next[x]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ x++;
+
+/*
+ * GRG
+ * BGB
+ * GRG
+ */
+#define GBRG_BGR888(p, n, div) \
+ *dst++ = blue_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(prev[x] + next[x]) / (2 * (div))]; \
+ x++;
+
+/*
+ * BGB
+ * GRG
+ * BGB
+ */
+#define RGGB_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[curr[x] / (div)]; \
+ x++;
+
+void DebayerCpu::debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 1, 1)
+ }
+}
+
+void DebayerCpu::debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 1, 1)
+ }
+}
+
+void DebayerCpu::debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 4)
+ GBRG_BGR888(1, 1, 4)
+ }
+}
+
+void DebayerCpu::debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 4)
+ RGGB_BGR888(1, 1, 4)
+ }
+}
+
+void DebayerCpu::debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 16)
+ GBRG_BGR888(1, 1, 16)
+ }
+}
+
+void DebayerCpu::debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 16)
+ RGGB_BGR888(1, 1, 16)
+ }
+}
+
+void DebayerCpu::debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ /*
+ * For the first pixel getting a pixel from the previous column uses
+ * x - 2 to skip the 5th byte with least-significant bits for 4 pixels.
+ * Same for last pixel (uses x + 2) and looking at the next column.
+ */
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ BGGR_BGR888(2, 1, 1)
+ /* Second pixel BGGR -> GBRG */
+ GBRG_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ GRBG_BGR888(2, 1, 1)
+ /* Second pixel GRBG -> RGGB */
+ RGGB_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ GBRG_BGR888(2, 1, 1)
+ /* Odd pixel GBGR -> BGGR */
+ BGGR_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ GBRG_BGR888(1, 1, 1)
+ BGGR_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ RGGB_BGR888(2, 1, 1)
+ /* Odd pixel RGGB -> GRBG */
+ GRBG_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ RGGB_BGR888(1, 1, 1)
+ GRBG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+static bool isStandardBayerOrder(BayerFormat::Order order)
+{
+ return order == BayerFormat::BGGR || order == BayerFormat::GBRG ||
+ order == BayerFormat::GRBG || order == BayerFormat::RGGB;
+}
+
+/*
+ * Setup the Debayer object according to the passed in parameters.
+ * Return 0 on success, a negative errno value on failure
+ * (unsupported parameters).
+ */
+int DebayerCpu::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = (bayerFormat.bitDepth + 7) & ~7;
+ config.patternSize.width = 2;
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888, formats::BGR888 });
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2 &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = 10;
+ config.patternSize.width = 4; /* 5 bytes per *4* pixels */
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888, formats::BGR888 });
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported input format " << inputFormat.toString();
+ return -EINVAL;
+}
+
+int DebayerCpu::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config)
+{
+ if (outputFormat == formats::RGB888 || outputFormat == formats::BGR888) {
+ config.bpp = 24;
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported output format " << outputFormat.toString();
+ return -EINVAL;
+}
+
+/*
+ * Check for standard Bayer orders and set xShift_ and swap debayer0/1, so that
+ * a single pair of BGGR debayer functions can be used for all 4 standard orders.
+ */
+int DebayerCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ break;
+ case BayerFormat::GRBG:
+ std::swap(debayer0_, debayer1_); /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ std::swap(debayer0_, debayer1_); /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int DebayerCpu::setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ xShift_ = 0;
+ swapRedBlueGains_ = false;
+
+ auto invalidFmt = []() -> int {
+ LOG(Debayer, Error) << "Unsupported input output format combination";
+ return -EINVAL;
+ };
+
+ switch (outputFormat) {
+ case formats::RGB888:
+ break;
+ case formats::BGR888:
+ /* Swap R and B in bayer order to generate BGR888 instead of RGB888 */
+ swapRedBlueGains_ = true;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ bayerFormat.order = BayerFormat::RGGB;
+ break;
+ case BayerFormat::GBRG:
+ bayerFormat.order = BayerFormat::GRBG;
+ break;
+ case BayerFormat::GRBG:
+ bayerFormat.order = BayerFormat::GBRG;
+ break;
+ case BayerFormat::RGGB:
+ bayerFormat.order = BayerFormat::BGGR;
+ break;
+ default:
+ return invalidFmt();
+ }
+ break;
+ default:
+ return invalidFmt();
+ }
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ debayer0_ = &DebayerCpu::debayer8_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer8_GRGR_BGR888;
+ break;
+ case 10:
+ debayer0_ = &DebayerCpu::debayer10_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10_GRGR_BGR888;
+ break;
+ case 12:
+ debayer0_ = &DebayerCpu::debayer12_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer12_GRGR_BGR888;
+ break;
+ }
+ setupStandardBayerOrder(bayerFormat.order);
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ debayer0_ = &DebayerCpu::debayer10P_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_GRGR_BGR888;
+ return 0;
+ case BayerFormat::GBRG:
+ debayer0_ = &DebayerCpu::debayer10P_GBGB_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_RGRG_BGR888;
+ return 0;
+ case BayerFormat::GRBG:
+ debayer0_ = &DebayerCpu::debayer10P_GRGR_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_BGBG_BGR888;
+ return 0;
+ case BayerFormat::RGGB:
+ debayer0_ = &DebayerCpu::debayer10P_RGRG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_GBGB_BGR888;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ return invalidFmt();
+}
+
+int DebayerCpu::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0)
+ return -EINVAL;
+
+ if (stats_->configure(inputCfg) != 0)
+ return -EINVAL;
+
+ const Size &statsPatternSize = stats_->patternSize();
+ if (inputConfig_.patternSize.width != statsPatternSize.width ||
+ inputConfig_.patternSize.height != statsPatternSize.height) {
+ LOG(Debayer, Error)
+ << "mismatching stats and debayer pattern sizes for "
+ << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+ }
+
+ inputConfig_.stride = inputCfg.stride;
+
+ if (outputCfgs.size() != 1) {
+ LOG(Debayer, Error)
+ << "Unsupported number of output streams: "
+ << outputCfgs.size();
+ return -EINVAL;
+ }
+
+ const StreamConfiguration &outputCfg = outputCfgs[0];
+ SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size);
+ std::tie(outputConfig_.stride, outputConfig_.frameSize) =
+ strideAndFrameSize(outputCfg.pixelFormat, outputCfg.size);
+
+ if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) {
+ LOG(Debayer, Error)
+ << "Invalid output size/stride: "
+ << "\n " << outputCfg.size << " (" << outSizeRange << ")"
+ << "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")";
+ return -EINVAL;
+ }
+
+ if (setDebayerFunctions(inputCfg.pixelFormat, outputCfg.pixelFormat) != 0)
+ return -EINVAL;
+
+ window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) &
+ ~(inputConfig_.patternSize.width - 1);
+ window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) &
+ ~(inputConfig_.patternSize.height - 1);
+ window_.width = outputCfg.size.width;
+ window_.height = outputCfg.size.height;
+
+ /* Don't pass x,y since process() already adjusts src before passing it */
+ stats_->setWindow(Rectangle(window_.size()));
+
+ /* pad with patternSize.Width on both left and right side */
+ lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8;
+ lineBufferLength_ = window_.width * inputConfig_.bpp / 8 +
+ 2 * lineBufferPadding_;
+ for (unsigned int i = 0;
+ i < (inputConfig_.patternSize.height + 1) && enableInputMemcpy_;
+ i++) {
+ free(lineBuffers_[i]);
+ lineBuffers_[i] = (uint8_t *)malloc(lineBufferLength_);
+ if (!lineBuffers_[i])
+ return -ENOMEM;
+ }
+
+ measuredFrames_ = 0;
+ frameProcessTime_ = 0;
+
+ return 0;
+}
+
+/*
+ * Get width and height at which the bayer-pattern repeats.
+ * Return pattern-size or an empty Size for an unsupported inputFormat.
+ */
+Size DebayerCpu::patternSize(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return {};
+
+ return config.patternSize;
+}
+
+std::vector<PixelFormat> DebayerCpu::formats(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return std::vector<PixelFormat>();
+
+ return config.outputFormats;
+}
+
+std::tuple<unsigned int, unsigned int>
+DebayerCpu::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ DebayerCpu::DebayerOutputConfig config;
+
+ if (getOutputConfig(outputFormat, config) != 0)
+ return std::make_tuple(0, 0);
+
+ /* round up to multiple of 8 for 64 bits alignment */
+ unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7;
+
+ return std::make_tuple(stride, stride * size.height);
+}
+
+void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ for (unsigned int i = 0; i < patternHeight; i++) {
+ memcpy(lineBuffers_[i], linePointers[i + 1] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[i + 1] = lineBuffers_[i] + lineBufferPadding_;
+ }
+
+ /* Point lineBufferIndex_ to first unused lineBuffer */
+ lineBufferIndex_ = patternHeight;
+}
+
+void DebayerCpu::shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src)
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ for (unsigned int i = 0; i < patternHeight; i++)
+ linePointers[i] = linePointers[i + 1];
+
+ linePointers[patternHeight] = src +
+ (patternHeight / 2) * (int)inputConfig_.stride;
+}
+
+void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ memcpy(lineBuffers_[lineBufferIndex_], linePointers[patternHeight] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[patternHeight] = lineBuffers_[lineBufferIndex_] + lineBufferPadding_;
+
+ lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1);
+}
+
+void DebayerCpu::process2(const uint8_t *src, uint8_t *dst)
+{
+ unsigned int yEnd = window_.y + window_.height;
+ /* Holds [0] previous- [1] current- [2] next-line */
+ const uint8_t *linePointers[3];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ if (window_.y) {
+ linePointers[1] = src - inputConfig_.stride; /* previous-line */
+ linePointers[2] = src;
+ } else {
+ /* window_.y == 0, use the next line as prev line */
+ linePointers[1] = src + inputConfig_.stride;
+ linePointers[2] = src;
+ /* Last 2 lines also need special handling */
+ yEnd -= 2;
+ }
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 2) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+
+ if (window_.y == 0) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(yEnd, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ /* next line may point outside of src, use prev. */
+ linePointers[2] = linePointers[0];
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+void DebayerCpu::process4(const uint8_t *src, uint8_t *dst)
+{
+ const unsigned int yEnd = window_.y + window_.height;
+ /*
+ * This holds pointers to [0] 2-lines-up [1] 1-line-up [2] current-line
+ * [3] 1-line-down [4] 2-lines-down.
+ */
+ const uint8_t *linePointers[5];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ linePointers[1] = src - 2 * inputConfig_.stride;
+ linePointers[2] = src - inputConfig_.stride;
+ linePointers[3] = src;
+ linePointers[4] = src + inputConfig_.stride;
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 4) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine2(y, linePointers);
+ (this->*debayer2_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer3_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+static inline int64_t timeDiff(timespec &after, timespec &before)
+{
+ return (after.tv_sec - before.tv_sec) * 1000000000LL +
+ (int64_t)after.tv_nsec - (int64_t)before.tv_nsec;
+}
+
+void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+{
+ timespec frameStartTime;
+
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) {
+ frameStartTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime);
+ }
+
+ /* Apply DebayerParams */
+ if (params.gamma != gammaCorrection_ || params.blackLevel != blackLevel_) {
+ const unsigned int blackIndex =
+ params.blackLevel * kGammaLookupSize / 256;
+ std::fill(gamma_.begin(), gamma_.begin() + blackIndex, 0);
+ const float divisor = kGammaLookupSize - blackIndex - 1.0;
+ for (unsigned int i = blackIndex; i < kGammaLookupSize; i++)
+ gamma_[i] = UINT8_MAX * powf((i - blackIndex) / divisor, params.gamma);
+
+ gammaCorrection_ = params.gamma;
+ blackLevel_ = params.blackLevel;
+ }
+
+ if (swapRedBlueGains_)
+ std::swap(params.gainR, params.gainB);
+
+ for (unsigned int i = 0; i < kRGBLookupSize; i++) {
+ constexpr unsigned int div =
+ kRGBLookupSize * DebayerParams::kGain10 / kGammaLookupSize;
+ unsigned int idx;
+
+ /* Apply gamma after gain! */
+ idx = std::min({ i * params.gainR / div, (kGammaLookupSize - 1) });
+ red_[i] = gamma_[idx];
+
+ idx = std::min({ i * params.gainG / div, (kGammaLookupSize - 1) });
+ green_[i] = gamma_[idx];
+
+ idx = std::min({ i * params.gainB / div, (kGammaLookupSize - 1) });
+ blue_[i] = gamma_[idx];
+ }
+
+ /* Copy metadata from the input buffer */
+ FrameMetadata &metadata = output->_d()->metadata();
+ metadata.status = input->metadata().status;
+ metadata.sequence = input->metadata().sequence;
+ metadata.timestamp = input->metadata().timestamp;
+
+ MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read);
+ MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write);
+ if (!in.isValid() || !out.isValid()) {
+ LOG(Debayer, Error) << "mmap-ing buffer(s) failed";
+ metadata.status = FrameMetadata::FrameError;
+ return;
+ }
+
+ stats_->startFrame();
+
+ if (inputConfig_.patternSize.height == 2)
+ process2(in.planes()[0].data(), out.planes()[0].data());
+ else
+ process4(in.planes()[0].data(), out.planes()[0].data());
+
+ metadata.planes()[0].bytesused = out.planes()[0].size();
+
+ /* Measure before emitting signals */
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+ ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+ timespec frameEndTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+ frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+ if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+ const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+ DebayerCpu::kFramesToSkip;
+ LOG(Debayer, Info)
+ << "Processed " << measuredFrames
+ << " frames in " << frameProcessTime_ / 1000 << "us, "
+ << frameProcessTime_ / (1000 * measuredFrames)
+ << " us/frame";
+ }
+ }
+
+ stats_->finishFrame();
+ outputBufferReady.emit(output);
+ inputBufferReady.emit(input);
+}
+
+SizeRange DebayerCpu::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ Size patternSize = this->patternSize(inputFormat);
+ unsigned int borderHeight = patternSize.height;
+
+ if (patternSize.isNull())
+ return {};
+
+ /* No need for top/bottom border with a pattern height of 2 */
+ if (patternSize.height == 2)
+ borderHeight = 0;
+
+ /*
+ * For debayer interpolation a border is kept around the entire image
+ * and the minimum output size is pattern-height x pattern-width.
+ */
+ if (inputSize.width < (3 * patternSize.width) ||
+ inputSize.height < (2 * borderHeight + patternSize.height)) {
+ LOG(Debayer, Warning)
+ << "Input format size too small: " << inputSize.toString();
+ return {};
+ }
+
+ return SizeRange(Size(patternSize.width, patternSize.height),
+ Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1),
+ (inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)),
+ patternSize.width, patternSize.height);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h
new file mode 100644
index 00000000..de216fe3
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering header
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/object.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "debayer.h"
+#include "swstats_cpu.h"
+
+namespace libcamera {
+
+class DebayerCpu : public Debayer, public Object
+{
+public:
+ DebayerCpu(std::unique_ptr<SwStatsCpu> stats);
+ ~DebayerCpu();
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs);
+ Size patternSize(PixelFormat inputFormat);
+ std::vector<PixelFormat> formats(PixelFormat input);
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+ void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params);
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ /**
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return the file descriptor pointing to the statistics
+ */
+ const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+
+ /**
+ * \brief Get the output frame size
+ *
+ * \return The output frame size
+ */
+ unsigned int frameSize() { return outputConfig_.frameSize; }
+
+private:
+ /**
+ * \brief Called to debayer 1 line of Bayer input data to output format
+ * \param[out] dst Pointer to the start of the output line to write
+ * \param[in] src The input data
+ *
+ * Input data is an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the Bayer source. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * These functions take an array of src pointers, rather than
+ * a single src pointer + a stride for the source, so that when the src
+ * is slow uncached memory it can be copied to faster memory before
+ * debayering. Debayering a standard 2x2 Bayer pattern requires access
+ * to the previous and next src lines for interpolating the missing
+ * colors. To allow copying the src lines only once 3 temporary buffers
+ * each holding a single line are used, re-using the oldest buffer for
+ * the next line and the pointers are swizzled so that:
+ * src[0] = previous-line, src[1] = currrent-line, src[2] = next-line.
+ * This way the 3 pointers passed to the debayer functions form
+ * a sliding window over the src avoiding the need to copy each
+ * line more than once.
+ *
+ * Similarly for bayer patterns which repeat every 4 lines, 5 src
+ * pointers are passed holding: src[0] = 2-lines-up, src[1] = 1-line-up
+ * src[2] = current-line, src[3] = 1-line-down, src[4] = 2-lines-down.
+ */
+ using debayerFn = void (DebayerCpu::*)(uint8_t *dst, const uint8_t *src[]);
+
+ /* 8-bit raw bayer format */
+ void debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 10-bit raw bayer format */
+ void debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 12-bit raw bayer format */
+ void debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* CSI-2 packed 10-bit raw bayer format (all the 4 orders) */
+ void debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]);
+
+ struct DebayerInputConfig {
+ Size patternSize;
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ std::vector<PixelFormat> outputFormats;
+ };
+
+ struct DebayerOutputConfig {
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ unsigned int frameSize;
+ };
+
+ int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config);
+ int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config);
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat);
+ void setupInputMemcpy(const uint8_t *linePointers[]);
+ void shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src);
+ void memcpyNextLine(const uint8_t *linePointers[]);
+ void process2(const uint8_t *src, uint8_t *dst);
+ void process4(const uint8_t *src, uint8_t *dst);
+
+ static constexpr unsigned int kGammaLookupSize = 1024;
+ static constexpr unsigned int kRGBLookupSize = 256;
+ /* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */
+ static constexpr unsigned int kMaxLineBuffers = 5;
+
+ std::array<uint8_t, kGammaLookupSize> gamma_;
+ std::array<uint8_t, kRGBLookupSize> red_;
+ std::array<uint8_t, kRGBLookupSize> green_;
+ std::array<uint8_t, kRGBLookupSize> blue_;
+ debayerFn debayer0_;
+ debayerFn debayer1_;
+ debayerFn debayer2_;
+ debayerFn debayer3_;
+ Rectangle window_;
+ DebayerInputConfig inputConfig_;
+ DebayerOutputConfig outputConfig_;
+ std::unique_ptr<SwStatsCpu> stats_;
+ uint8_t *lineBuffers_[kMaxLineBuffers];
+ unsigned int lineBufferLength_;
+ unsigned int lineBufferPadding_;
+ unsigned int lineBufferIndex_;
+ unsigned int xShift_; /* Offset of 0/1 applied to window_.x */
+ bool enableInputMemcpy_;
+ bool swapRedBlueGains_;
+ float gammaCorrection_;
+ unsigned int blackLevel_;
+ unsigned int measuredFrames_;
+ int64_t frameProcessTime_;
+ /* Skip 30 frames for things to stabilize then measure 30 frames */
+ static constexpr unsigned int kFramesToSkip = 30;
+ static constexpr unsigned int kLastFrameToMeasure = 60;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build
new file mode 100644
index 00000000..f7c66e28
--- /dev/null
+++ b/src/libcamera/software_isp/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+softisp_enabled = pipelines.contains('simple')
+summary({'SoftISP support' : softisp_enabled}, section : 'Configuration')
+
+if not softisp_enabled
+ subdir_done()
+endif
+
+libcamera_sources += files([
+ 'debayer.cpp',
+ 'debayer_cpu.cpp',
+ 'software_isp.cpp',
+ 'swstats_cpu.cpp',
+])
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
new file mode 100644
index 00000000..c9b6be56
--- /dev/null
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple software ISP implementation
+ */
+
+#include "libcamera/internal/software_isp/software_isp.h"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "debayer_cpu.h"
+
+/**
+ * \file software_isp.cpp
+ * \brief Simple software ISP implementation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SoftwareIsp)
+
+/**
+ * \class SoftwareIsp
+ * \brief Class for the Software ISP
+ */
+
+/**
+ * \var SoftwareIsp::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::outputBufferReady
+ * \brief A signal emitted when the output frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::ispStatsReady
+ * \brief A signal emitted when the statistics for IPA are ready
+ */
+
+/**
+ * \var SoftwareIsp::setSensorControls
+ * \brief A signal emitted when the values to write to the sensor controls are
+ * ready
+ */
+
+/**
+ * \brief Constructs SoftwareIsp object
+ * \param[in] pipe The pipeline handler in use
+ * \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline
+ * handler
+ */
+SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor)
+ : debayerParams_{ DebayerParams::kGain10, DebayerParams::kGain10,
+ DebayerParams::kGain10, 0.5f, 0 },
+ dmaHeap_(DmaHeap::DmaHeapFlag::Cma | DmaHeap::DmaHeapFlag::System)
+{
+ if (!dmaHeap_.isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create DmaHeap object";
+ return;
+ }
+
+ sharedParams_ = SharedMemObject<DebayerParams>("softIsp_params");
+ if (!sharedParams_) {
+ LOG(SoftwareIsp, Error) << "Failed to create shared memory for parameters";
+ return;
+ }
+
+ auto stats = std::make_unique<SwStatsCpu>();
+ if (!stats->isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create SwStatsCpu object";
+ return;
+ }
+ stats->statsReady.connect(this, &SoftwareIsp::statsReady);
+
+ debayer_ = std::make_unique<DebayerCpu>(std::move(stats));
+ debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady);
+ debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady);
+
+ ipa_ = IPAManager::createIPA<ipa::soft::IPAProxySoft>(pipe, 0, 0);
+ if (!ipa_) {
+ LOG(SoftwareIsp, Error)
+ << "Creating IPA for software ISP failed";
+ debayer_.reset();
+ return;
+ }
+
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
+ if (ipaTuningFile.empty())
+ ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
+
+ int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ debayer_->getStatsFD(),
+ sharedParams_.fd(),
+ sensor->controls());
+ if (ret) {
+ LOG(SoftwareIsp, Error) << "IPA init failed";
+ debayer_.reset();
+ return;
+ }
+
+ ipa_->setIspParams.connect(this, &SoftwareIsp::saveIspParams);
+ ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls);
+
+ debayer_->moveToThread(&ispWorkerThread_);
+}
+
+SoftwareIsp::~SoftwareIsp()
+{
+ /* make sure to destroy the DebayerCpu before the ispWorkerThread_ is gone */
+ debayer_.reset();
+}
+
+/**
+ * \fn int SoftwareIsp::loadConfiguration([[maybe_unused]] const std::string &filename)
+ * \brief Load a configuration from a file
+ * \param[in] filename The file to load the configuration data from
+ *
+ * Currently is a stub doing nothing and always returning "success".
+ *
+ * \return 0 on success
+ */
+
+/**
+ * \brief Process the statistics gathered
+ * \param[in] sensorControls The sensor controls
+ *
+ * Requests the IPA to calculate new parameters for ISP and new control
+ * values for the sensor.
+ */
+void SoftwareIsp::processStats(const ControlList &sensorControls)
+{
+ ASSERT(ipa_);
+ ipa_->processStats(sensorControls);
+}
+
+/**
+ * \brief Check the validity of Software Isp object
+ * \return True if Software Isp is valid, false otherwise
+ */
+bool SoftwareIsp::isValid() const
+{
+ return !!debayer_;
+}
+
+/**
+ * \brief Get the output formats supported for the given input format
+ * \param[in] inputFormat The input format
+ * \return All the supported output formats or an empty vector if there are none
+ */
+std::vector<PixelFormat> SoftwareIsp::formats(PixelFormat inputFormat)
+{
+ ASSERT(debayer_);
+
+ return debayer_->formats(inputFormat);
+}
+
+/**
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input frame size
+ * \return The valid size range or an empty range if there are none
+ */
+SizeRange SoftwareIsp::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ ASSERT(debayer_);
+
+ return debayer_->sizes(inputFormat, inputSize);
+}
+
+/**
+ * Get the output stride and the frame size in bytes for the given output format and size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size (width and height in pixels)
+ * \return A tuple of the stride and the frame size in bytes, or a tuple of 0,0
+ * if there is no valid output config
+ */
+std::tuple<unsigned int, unsigned int>
+SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ ASSERT(debayer_);
+
+ return debayer_->strideAndFrameSize(outputFormat, size);
+}
+
+/**
+ * \brief Configure the SoftwareIsp object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ * \param[in] sensorControls ControlInfoMap of the controls supported by the sensor
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ControlInfoMap &sensorControls)
+{
+ ASSERT(ipa_ && debayer_);
+
+ int ret = ipa_->configure(sensorControls);
+ if (ret < 0)
+ return ret;
+
+ return debayer_->configure(inputCfg, outputCfgs);
+}
+
+/**
+ * \brief Export the buffers from the Software ISP
+ * \param[in] output Output stream index exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store the allocated buffers
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ ASSERT(debayer_ != nullptr);
+
+ /* single output for now */
+ if (output >= 1)
+ return -EINVAL;
+
+ for (unsigned int i = 0; i < count; i++) {
+ const std::string name = "frame-" + std::to_string(i);
+ const size_t frameSize = debayer_->frameSize();
+
+ FrameBuffer::Plane outPlane;
+ outPlane.fd = SharedFD(dmaHeap_.alloc(name.c_str(), frameSize));
+ if (!outPlane.fd.isValid()) {
+ LOG(SoftwareIsp, Error)
+ << "failed to allocate a dma_buf";
+ return -ENOMEM;
+ }
+ outPlane.offset = 0;
+ outPlane.length = frameSize;
+
+ std::vector<FrameBuffer::Plane> planes{ outPlane };
+ buffers->emplace_back(std::make_unique<FrameBuffer>(std::move(planes)));
+ }
+
+ return count;
+}
+
+/**
+ * \brief Queue buffers to Software ISP
+ * \param[in] input The input framebuffer
+ * \param[in] outputs The container holding the output stream indexes and
+ * their respective frame buffer outputs
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs)
+{
+ unsigned int mask = 0;
+
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream and no two
+ * outputs can reference the same stream.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [index, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+ if (index >= 1) /* only single stream atm */
+ return -EINVAL;
+ if (mask & (1 << index))
+ return -EINVAL;
+
+ mask |= 1 << index;
+ }
+
+ process(input, outputs.at(0));
+
+ return 0;
+}
+
+/**
+ * \brief Starts the Software ISP streaming operation
+ * \return 0 on success, any other value indicates an error
+ */
+int SoftwareIsp::start()
+{
+ int ret = ipa_->start();
+ if (ret)
+ return ret;
+
+ ispWorkerThread_.start();
+ return 0;
+}
+
+/**
+ * \brief Stops the Software ISP streaming operation
+ */
+void SoftwareIsp::stop()
+{
+ ispWorkerThread_.exit();
+ ispWorkerThread_.wait();
+
+ ipa_->stop();
+}
+
+/**
+ * \brief Passes the input framebuffer to the ISP worker to process
+ * \param[in] input The input framebuffer
+ * \param[out] output The framebuffer to write the processed frame to
+ */
+void SoftwareIsp::process(FrameBuffer *input, FrameBuffer *output)
+{
+ debayer_->invokeMethod(&DebayerCpu::process,
+ ConnectionTypeQueued, input, output, debayerParams_);
+}
+
+void SoftwareIsp::saveIspParams()
+{
+ debayerParams_ = *sharedParams_;
+}
+
+void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls)
+{
+ setSensorControls.emit(sensorControls);
+}
+
+void SoftwareIsp::statsReady()
+{
+ ispStatsReady.emit();
+}
+
+void SoftwareIsp::inputReady(FrameBuffer *input)
+{
+ inputBufferReady.emit(input);
+}
+
+void SoftwareIsp::outputReady(FrameBuffer *output)
+{
+ outputBufferReady.emit(output);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp
new file mode 100644
index 00000000..815c4d4f
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.cpp
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#include "swstats_cpu.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+namespace libcamera {
+
+/**
+ * \class SwStatsCpu
+ * \brief Class for gathering statistics on the CPU
+ *
+ * CPU based software ISP statistics implementation.
+ *
+ * This class offers a configure function + functions to gather statistics on a
+ * line by line basis. This allows CPU based software debayering to interleave
+ * debayering and statistics gathering on a line by line basis while the input
+ * data is still hot in the cache.
+ *
+ * It is also possible to specify a window over which to gather statistics
+ * instead of processing the whole frame.
+ */
+
+/**
+ * \fn bool SwStatsCpu::isValid() const
+ * \brief Gets whether the statistics object is valid
+ *
+ * \return True if it's valid, false otherwise
+ */
+
+/**
+ * \fn const SharedFD &SwStatsCpu::getStatsFD()
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return The file descriptor
+ */
+
+/**
+ * \fn const Size &SwStatsCpu::patternSize()
+ * \brief Get the pattern size
+ *
+ * For some input-formats, e.g. Bayer data, processing is done multiple lines
+ * and/or columns at a time. Get width and height at which the (bayer) pattern
+ * repeats. Window values are rounded down to a multiple of this and the height
+ * also indicates if processLine2() should be called or not.
+ * This may only be called after a successful configure() call.
+ *
+ * \return The pattern size
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine0(unsigned int y, const uint8_t *src[])
+ * \brief Process line 0
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 0 for input formats with
+ * patternSize height == 1.
+ * It'll process line 0 and 1 for input formats with patternSize height >= 2.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine2(unsigned int y, const uint8_t *src[])
+ * \brief Process line 2 and 3
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 2 and 3 for input formats with
+ * patternSize height == 4.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \var Signal<> SwStatsCpu::statsReady
+ * \brief Signals that the statistics are ready
+ */
+
+/**
+ * \typedef SwStatsCpu::statsProcessFn
+ * \brief Called when there is data to get statistics from
+ * \param[in] src The input data
+ *
+ * These functions take an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the source image. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * See the documentation of DebayerCpu::debayerFn for more details.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::ySkipMask_
+ * \brief Skip lines where this bitmask is set in y
+ */
+
+/**
+ * \var Rectangle SwStatsCpu::window_
+ * \brief Statistics window, set by setWindow(), used every line
+ */
+
+/**
+ * \var Size SwStatsCpu::patternSize_
+ * \brief The size of the bayer pattern
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::xShift_
+ * \brief The offset of x, applied to window_.x for bayer variants
+ *
+ * This can either be 0 or 1.
+ */
+
+LOG_DEFINE_CATEGORY(SwStatsCpu)
+
+SwStatsCpu::SwStatsCpu()
+ : sharedStats_("softIsp_stats")
+{
+ if (!sharedStats_)
+ LOG(SwStatsCpu, Error)
+ << "Failed to create shared memory for statistics";
+}
+
+static constexpr unsigned int kRedYMul = 77; /* 0.299 * 256 */
+static constexpr unsigned int kGreenYMul = 150; /* 0.587 * 256 */
+static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */
+
+#define SWSTATS_START_LINE_STATS(pixel_t) \
+ pixel_t r, g, g2, b; \
+ uint64_t yVal; \
+ \
+ uint64_t sumR = 0; \
+ uint64_t sumG = 0; \
+ uint64_t sumB = 0;
+
+#define SWSTATS_ACCUMULATE_LINE_STATS(div) \
+ sumR += r; \
+ sumG += g; \
+ sumB += b; \
+ \
+ yVal = r * kRedYMul; \
+ yVal += g * kGreenYMul; \
+ yVal += b * kBlueYMul; \
+ stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++;
+
+#define SWSTATS_FINISH_LINE_STATS() \
+ stats_.sumR_ += sumR; \
+ stats_.sumG_ += sumG; \
+ stats_.sumB_ += sumB;
+
+void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x;
+ const uint8_t *src1 = src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 4 for 10 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(4)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR12Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 16 for 12 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(16)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* BGGR */
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsGBRG10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* GBRG */
+ g = src0[x];
+ b = src0[x + 1];
+ r = src1[x];
+ g2 = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+/**
+ * \brief Reset state to start statistics gathering for a new frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::startFrame(void)
+{
+ if (window_.width == 0)
+ LOG(SwStatsCpu, Error) << "Calling startFrame() without setWindow()";
+
+ stats_.sumR_ = 0;
+ stats_.sumB_ = 0;
+ stats_.sumG_ = 0;
+ stats_.yHistogram.fill(0);
+}
+
+/**
+ * \brief Finish statistics calculation for the current frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::finishFrame(void)
+{
+ *sharedStats_ = stats_;
+ statsReady.emit();
+}
+
+/**
+ * \brief Setup SwStatsCpu object for standard Bayer orders
+ * \param[in] order The Bayer order
+ *
+ * Check if order is a standard Bayer order and setup xShift_ and swapLines_
+ * so that a single BGGR stats function can be used for all 4 standard orders.
+ */
+int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ xShift_ = 0;
+ swapLines_ = false;
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = false;
+ break;
+ case BayerFormat::GRBG:
+ xShift_ = 0;
+ swapLines_ = true; /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = true; /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ patternSize_.height = 2;
+ patternSize_.width = 2;
+ ySkipMask_ = 0x02; /* Skip every 3th and 4th line */
+ return 0;
+}
+
+/**
+ * \brief Configure the statistics object for the passed in input format
+ * \param[in] inputCfg The input format
+ *
+ * \return 0 on success, a negative errno value on failure
+ */
+int SwStatsCpu::configure(const StreamConfiguration &inputCfg)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputCfg.pixelFormat);
+
+ if (bayerFormat.packing == BayerFormat::Packing::None &&
+ setupStandardBayerOrder(bayerFormat.order) == 0) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ stats0_ = &SwStatsCpu::statsBGGR8Line0;
+ return 0;
+ case 10:
+ stats0_ = &SwStatsCpu::statsBGGR10Line0;
+ return 0;
+ case 12:
+ stats0_ = &SwStatsCpu::statsBGGR12Line0;
+ return 0;
+ }
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ patternSize_.height = 2;
+ patternSize_.width = 4; /* 5 bytes per *4* pixels */
+ /* Skip every 3th and 4th line, sample every other 2x2 block */
+ ySkipMask_ = 0x02;
+ xShift_ = 0;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ case BayerFormat::GRBG:
+ stats0_ = &SwStatsCpu::statsBGGR10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::GRBG;
+ return 0;
+ case BayerFormat::GBRG:
+ case BayerFormat::RGGB:
+ stats0_ = &SwStatsCpu::statsGBRG10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::RGGB;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ LOG(SwStatsCpu, Info)
+ << "Unsupported input format " << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+}
+
+/**
+ * \brief Specify window coordinates over which to gather statistics
+ * \param[in] window The window object.
+ */
+void SwStatsCpu::setWindow(const Rectangle &window)
+{
+ window_ = window;
+
+ window_.x &= ~(patternSize_.width - 1);
+ window_.x += xShift_;
+ window_.y &= ~(patternSize_.height - 1);
+
+ /* width_ - xShift_ to make sure the window fits */
+ window_.width -= xShift_;
+ window_.width &= ~(patternSize_.width - 1);
+ window_.height &= ~(patternSize_.height - 1);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.h b/src/libcamera/software_isp/swstats_cpu.h
new file mode 100644
index 00000000..363e326f
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class PixelFormat;
+struct StreamConfiguration;
+
+class SwStatsCpu
+{
+public:
+ SwStatsCpu();
+ ~SwStatsCpu() = default;
+
+ bool isValid() const { return sharedStats_.fd().isValid(); }
+
+ const SharedFD &getStatsFD() { return sharedStats_.fd(); }
+
+ const Size &patternSize() { return patternSize_; }
+
+ int configure(const StreamConfiguration &inputCfg);
+ void setWindow(const Rectangle &window);
+ void startFrame();
+ void finishFrame();
+
+ void processLine0(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats0_)(src);
+ }
+
+ void processLine2(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats2_)(src);
+ }
+
+ Signal<> statsReady;
+
+private:
+ using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]);
+
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ /* Bayer 8 bpp unpacked */
+ void statsBGGR8Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp unpacked */
+ void statsBGGR10Line0(const uint8_t *src[]);
+ /* Bayer 12 bpp unpacked */
+ void statsBGGR12Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp packed */
+ void statsBGGR10PLine0(const uint8_t *src[]);
+ void statsGBRG10PLine0(const uint8_t *src[]);
+
+ /* Variables set by configure(), used every line */
+ statsProcessFn stats0_;
+ statsProcessFn stats2_;
+ bool swapLines_;
+
+ unsigned int ySkipMask_;
+
+ Rectangle window_;
+
+ Size patternSize_;
+
+ unsigned int xShift_;
+
+ SharedMemObject<SwIspStats> sharedStats_;
+ SwIspStats stats_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/source_paths.cpp b/src/libcamera/source_paths.cpp
index 19689585..1af5386a 100644
--- a/src/libcamera/source_paths.cpp
+++ b/src/libcamera/source_paths.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * source_paths.cpp - Identify libcamera source and build paths
+ * Identify libcamera source and build paths
*/
#include "libcamera/internal/source_paths.h"
diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
index 686e693b..053cc4b8 100644
--- a/src/libcamera/stream.cpp
+++ b/src/libcamera/stream.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream.cpp - Video stream for a Camera
+ * Video stream for a Camera
*/
#include <libcamera/stream.h>
@@ -311,7 +311,8 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
* The stride value reports the number of bytes between the beginning of
* successive lines in an image buffer for this stream. The value is
* valid after successfully validating the configuration with a call to
- * CameraConfiguration::validate().
+ * CameraConfiguration::validate(). For compressed formats (such as MJPEG),
+ * this value will be zero.
*/
/**
@@ -418,9 +419,23 @@ std::string StreamConfiguration::toString() const
*/
/**
- * \typedef StreamRoles
- * \brief A vector of StreamRole
+ * \brief Insert a text representation of a StreamRole into an output stream
+ * \param[in] out The output stream
+ * \param[in] role The StreamRole
+ * \return The output stream \a out
*/
+std::ostream &operator<<(std::ostream &out, StreamRole role)
+{
+ static constexpr std::array<const char *, 4> names{
+ "Raw",
+ "StillCapture",
+ "VideoRecording",
+ "Viewfinder",
+ };
+
+ out << names[utils::to_underlying(role)];
+ return out;
+}
/**
* \class Stream
diff --git a/src/libcamera/sysfs.cpp b/src/libcamera/sysfs.cpp
index 44c3331b..3d9885b0 100644
--- a/src/libcamera/sysfs.cpp
+++ b/src/libcamera/sysfs.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * sysfs.cpp - Miscellaneous utility functions to access sysfs
+ * Miscellaneous utility functions to access sysfs
*/
#include "libcamera/internal/sysfs.h"
diff --git a/src/libcamera/tracepoints.cpp b/src/libcamera/tracepoints.cpp
index 0173b75a..90662d12 100644
--- a/src/libcamera/tracepoints.cpp
+++ b/src/libcamera/tracepoints.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * tracepoints.cpp - Tracepoints with lttng
+ * Tracepoints with lttng
*/
#define TRACEPOINT_CREATE_PROBES
#define TRACEPOINT_DEFINE
diff --git a/src/libcamera/transform.cpp b/src/libcamera/transform.cpp
index 99a043ba..9fe8b562 100644
--- a/src/libcamera/transform.cpp
+++ b/src/libcamera/transform.cpp
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * transform.cpp - 2D plane transforms.
+ * 2D plane transforms.
*/
#include <libcamera/transform.h>
+#include <libcamera/orientation.h>
+
/**
* \file transform.h
* \brief Enum to represent and manipulate 2D plane transforms
@@ -187,24 +189,24 @@ Input image | | goes to output image | |
*/
/**
- * \brief Compose two transforms together
- * \param[in] t1 The second transform
- * \param[in] t0 The first transform
+ * \brief Compose two transforms by applying \a t0 first then \a t1
+ * \param[in] t0 The first transform to apply
+ * \param[in] t1 The second transform to apply
*
- * Composing transforms follows the usual mathematical convention for
- * composing functions. That is, when performing `t1 * t0`, \a t0 is applied
- * first, and then \a t1.
- * For example, `Transpose * HFlip` performs `HFlip` first and then the
- * `Transpose` yielding `Rot270`, as shown below.
+ * Compose two transforms into a transform that is equivalent to first applying
+ * \a t0 and then applying \a t1. For example, `HFlip * Transpose` performs
+ * `HFlip` first and then the `Transpose` yielding `Rot270`, as shown below.
~~~
A-B B-A B-D
Input image | | -> HFLip -> | | -> Transpose -> | | = Rot270
C-D D-C A-C
~~~
- * Note that composition is generally non-commutative for Transforms,
- * and not the same as XOR-ing the underlying bit representations.
+ * Note that composition is generally non-commutative for Transforms, and not
+ * the same as XOR-ing the underlying bit representations.
+ *
+ * \return A Transform equivalent to applying \a t0 and then \a t1
*/
-Transform operator*(Transform t1, Transform t0)
+Transform operator*(Transform t0, Transform t1)
{
/*
* Reorder the operations so that we imagine doing t0's transpose
@@ -299,6 +301,91 @@ Transform transformFromRotation(int angle, bool *success)
return Transform::Identity;
}
+namespace {
+
+/**
+ * \brief Return the transform representing \a orientation
+ * \param[in] orientation The orientation to convert
+ * \return The transform corresponding to \a orientation
+ */
+Transform transformFromOrientation(const Orientation &orientation)
+{
+ switch (orientation) {
+ case Orientation::Rotate0:
+ return Transform::Identity;
+ case Orientation::Rotate0Mirror:
+ return Transform::HFlip;
+ case Orientation::Rotate180:
+ return Transform::Rot180;
+ case Orientation::Rotate180Mirror:
+ return Transform::VFlip;
+ case Orientation::Rotate90Mirror:
+ return Transform::Transpose;
+ case Orientation::Rotate90:
+ return Transform::Rot90;
+ case Orientation::Rotate270Mirror:
+ return Transform::Rot180Transpose;
+ case Orientation::Rotate270:
+ return Transform::Rot270;
+ }
+
+ return Transform::Identity;
+}
+
+} /* namespace */
+
+/**
+ * \brief Return the Transform that applied to \a o2 gives \a o1
+ * \param o1 The Orientation to obtain
+ * \param o2 The base Orientation
+ *
+ * This operation can be used to easily compute the Transform to apply to a
+ * base orientation \a o2 to get the desired orientation \a o1.
+ *
+ * \return A Transform that applied to \a o2 gives \a o1
+ */
+Transform operator/(const Orientation &o1, const Orientation &o2)
+{
+ Transform t1 = transformFromOrientation(o1);
+ Transform t2 = transformFromOrientation(o2);
+
+ return -t2 * t1;
+}
+
+/**
+ * \brief Apply the Transform \a t on the orientation \a o
+ * \param o The orientation
+ * \param t The transform to apply on \a o
+ * \return The Orientation resulting from applying \a t on \a o
+ */
+Orientation operator*(const Orientation &o, const Transform &t)
+{
+ /*
+ * Apply a Transform corresponding to the orientation first and
+ * then apply \a t to it.
+ */
+ switch (transformFromOrientation(o) * t) {
+ case Transform::Identity:
+ return Orientation::Rotate0;
+ case Transform::HFlip:
+ return Orientation::Rotate0Mirror;
+ case Transform::VFlip:
+ return Orientation::Rotate180Mirror;
+ case Transform::Rot180:
+ return Orientation::Rotate180;
+ case Transform::Transpose:
+ return Orientation::Rotate90Mirror;
+ case Transform::Rot270:
+ return Orientation::Rotate270;
+ case Transform::Rot90:
+ return Orientation::Rotate90;
+ case Transform::Rot180Transpose:
+ return Orientation::Rotate270Mirror;
+ }
+
+ return Orientation::Rotate0;
+}
+
/**
* \brief Return a character string describing the transform
* \param[in] t The transform to be described.
diff --git a/src/libcamera/v4l2_device.cpp b/src/libcamera/v4l2_device.cpp
index 3fc8438f..4a2048cf 100644
--- a/src/libcamera/v4l2_device.cpp
+++ b/src/libcamera/v4l2_device.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_device.cpp - Common base for V4L2 video devices and subdevices
+ * Common base for V4L2 video devices and subdevices
*/
#include "libcamera/internal/v4l2_device.h"
@@ -24,6 +24,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include "libcamera/internal/formats.h"
#include "libcamera/internal/sysfs.h"
/**
@@ -85,18 +86,18 @@ int V4L2Device::open(unsigned int flags)
return -EBUSY;
}
- UniqueFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(), flags));
+ UniqueFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
+ flags | O_CLOEXEC));
if (!fd.isValid()) {
int ret = -errno;
- LOG(V4L2, Error) << "Failed to open V4L2 device: "
+ LOG(V4L2, Error) << "Failed to open V4L2 device '"
+ << deviceNode_ << "': "
<< strerror(-ret);
return ret;
}
setFd(std::move(fd));
- listControls();
-
return 0;
}
@@ -127,6 +128,8 @@ int V4L2Device::setFd(UniqueFD fd)
fdEventNotifier_->activated.connect(this, &V4L2Device::eventAvailable);
fdEventNotifier_->setEnabled(false);
+ listControls();
+
return 0;
}
@@ -242,7 +245,8 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to read control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to read control " << utils::hex(id)
<< ": " << strerror(-ret);
v4l2Ctrls.resize(errorIdx);
@@ -352,7 +356,8 @@ int V4L2Device::setControls(ControlList *ctrls)
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to set control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to set control " << utils::hex(id)
<< ": " << strerror(-ret);
v4l2Ctrls.resize(errorIdx);
@@ -525,7 +530,7 @@ std::unique_ptr<ControlId> V4L2Device::v4l2ControlId(const v4l2_query_ext_ctrl &
* \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 control
* \return A ControlInfo that represents \a ctrl
*/
-ControlInfo V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
+std::optional<ControlInfo> V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
{
switch (ctrl.type) {
case V4L2_CTRL_TYPE_U8:
@@ -562,14 +567,14 @@ ControlInfo V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
*
* \return A ControlInfo that represents \a ctrl
*/
-ControlInfo V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
+std::optional<ControlInfo> V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
{
std::vector<ControlValue> indices;
struct v4l2_querymenu menu = {};
menu.id = ctrl.id;
if (ctrl.minimum < 0)
- return ControlInfo();
+ return std::nullopt;
for (int32_t index = ctrl.minimum; index <= ctrl.maximum; ++index) {
menu.index = index;
@@ -579,6 +584,14 @@ ControlInfo V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ct
indices.push_back(index);
}
+ /*
+ * Some faulty UVC devices are known to return an empty menu control.
+ * Controls without a menu option can not be set, or read, so they are
+ * not exposed.
+ */
+ if (indices.size() == 0)
+ return std::nullopt;
+
return ControlInfo(indices,
ControlValue(static_cast<int32_t>(ctrl.default_value)));
}
@@ -627,7 +640,17 @@ void V4L2Device::listControls()
controlIdMap_[ctrl.id] = controlIds_.back().get();
controlInfo_.emplace(ctrl.id, ctrl);
- ctrls.emplace(controlIds_.back().get(), v4l2ControlInfo(ctrl));
+ std::optional<ControlInfo> info = v4l2ControlInfo(ctrl);
+
+ if (!info) {
+ LOG(V4L2, Error)
+ << "Control " << ctrl.name
+ << " cannot be registered";
+
+ continue;
+ }
+
+ ctrls.emplace(controlIds_.back().get(), *info);
}
controls_ = ControlInfoMap(std::move(ctrls), controlIdMap_);
@@ -666,7 +689,7 @@ void V4L2Device::updateControlInfo()
continue;
}
- info = v4l2ControlInfo(ctrl);
+ info = *v4l2ControlInfo(ctrl);
}
}
@@ -745,8 +768,12 @@ void V4L2Device::eventAvailable()
static const std::map<uint32_t, ColorSpace> v4l2ToColorSpace = {
{ V4L2_COLORSPACE_RAW, ColorSpace::Raw },
- { V4L2_COLORSPACE_JPEG, ColorSpace::Jpeg },
- { V4L2_COLORSPACE_SRGB, ColorSpace::Srgb },
+ { V4L2_COLORSPACE_SRGB, {
+ ColorSpace::Primaries::Rec709,
+ ColorSpace::TransferFunction::Srgb,
+ ColorSpace::YcbcrEncoding::Rec601,
+ ColorSpace::Range::Limited } },
+ { V4L2_COLORSPACE_JPEG, ColorSpace::Sycc },
{ V4L2_COLORSPACE_SMPTE170M, ColorSpace::Smpte170m },
{ V4L2_COLORSPACE_REC709, ColorSpace::Rec709 },
{ V4L2_COLORSPACE_BT2020, ColorSpace::Rec2020 },
@@ -771,8 +798,7 @@ static const std::map<uint32_t, ColorSpace::Range> v4l2ToRange = {
static const std::vector<std::pair<ColorSpace, v4l2_colorspace>> colorSpaceToV4l2 = {
{ ColorSpace::Raw, V4L2_COLORSPACE_RAW },
- { ColorSpace::Jpeg, V4L2_COLORSPACE_JPEG },
- { ColorSpace::Srgb, V4L2_COLORSPACE_SRGB },
+ { ColorSpace::Sycc, V4L2_COLORSPACE_JPEG },
{ ColorSpace::Smpte170m, V4L2_COLORSPACE_SMPTE170M },
{ ColorSpace::Rec709, V4L2_COLORSPACE_REC709 },
{ ColorSpace::Rec2020, V4L2_COLORSPACE_BT2020 },
@@ -792,6 +818,8 @@ static const std::map<ColorSpace::TransferFunction, v4l2_xfer_func> transferFunc
};
static const std::map<ColorSpace::YcbcrEncoding, v4l2_ycbcr_encoding> ycbcrEncodingToV4l2 = {
+ /* V4L2 has no "none" encoding. */
+ { ColorSpace::YcbcrEncoding::None, V4L2_YCBCR_ENC_DEFAULT },
{ ColorSpace::YcbcrEncoding::Rec601, V4L2_YCBCR_ENC_601 },
{ ColorSpace::YcbcrEncoding::Rec709, V4L2_YCBCR_ENC_709 },
{ ColorSpace::YcbcrEncoding::Rec2020, V4L2_YCBCR_ENC_BT2020 },
@@ -805,6 +833,7 @@ static const std::map<ColorSpace::Range, v4l2_quantization> rangeToV4l2 = {
/**
* \brief Convert the color space fields in a V4L2 format to a ColorSpace
* \param[in] v4l2Format A V4L2 format containing color space information
+ * \param[in] colourEncoding Type of colour encoding
*
* The colorspace, ycbcr_enc, xfer_func and quantization fields within a
* V4L2 format structure are converted to a corresponding ColorSpace.
@@ -816,7 +845,8 @@ static const std::map<ColorSpace::Range, v4l2_quantization> rangeToV4l2 = {
* \retval std::nullopt One or more V4L2 color space fields were not recognised
*/
template<typename T>
-std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
+std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format,
+ PixelFormatInfo::ColourEncoding colourEncoding)
{
auto itColor = v4l2ToColorSpace.find(v4l2Format.colorspace);
if (itColor == v4l2ToColorSpace.end())
@@ -839,6 +869,14 @@ std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
return std::nullopt;
colorSpace.ycbcrEncoding = itYcbcrEncoding->second;
+
+ /*
+ * V4L2 has no "none" encoding, override the value returned by
+ * the kernel for non-YUV formats as YCbCr encoding isn't
+ * applicable in that case.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
}
if (v4l2Format.quantization != V4L2_QUANTIZATION_DEFAULT) {
@@ -847,14 +885,24 @@ std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
return std::nullopt;
colorSpace.range = itRange->second;
+
+ /*
+ * "Limited" quantization range is only meant for YUV formats.
+ * Override the range to "Full" for all other formats.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.range = ColorSpace::Range::Full;
}
return colorSpace;
}
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format &);
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format_mplane &);
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_mbus_framefmt &);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format_mplane &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_mbus_framefmt &,
+ PixelFormatInfo::ColourEncoding);
/**
* \brief Fill in the color space fields of a V4L2 format from a ColorSpace
diff --git a/src/libcamera/v4l2_pixelformat.cpp b/src/libcamera/v4l2_pixelformat.cpp
index 58fc4e9d..70568335 100644
--- a/src/libcamera/v4l2_pixelformat.cpp
+++ b/src/libcamera/v4l2_pixelformat.cpp
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * v4l2_pixelformat.cpp - V4L2 Pixel Format
+ * V4L2 Pixel Format
*/
#include "libcamera/internal/v4l2_pixelformat.h"
@@ -71,6 +71,10 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::BGRA8888, "32-bit ARGB 8-8-8-8" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
{ formats::RGBA8888, "32-bit ABGR 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB48),
+ { formats::BGR161616, "48-bit RGB 16-16-16" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGR48),
+ { formats::RGB161616, "48-bit BGR 16-16-16" } },
/* YUV packed formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
@@ -81,6 +85,10 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::UYVY, "UYVY 4:2:2" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_VYUY),
{ formats::VYUY, "VYUY 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32),
+ { formats::AVUY8888, "32-bit YUVA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32),
+ { formats::XVUY8888, "32-bit YUVX 8-8-8-8" } },
/* YUV planar formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_NV16),
@@ -119,7 +127,7 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::YVU422, "Planar YVU 4:2:2 (N-C)" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
{ formats::YUV444, "Planar YUV 4:4:4 (N-C)" } },
- { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M),
{ formats::YVU444, "Planar YVU 4:4:4 (N-C)" } },
/* Greyscale formats. */
@@ -127,8 +135,12 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::R8, "8-bit Greyscale" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_Y10),
{ formats::R10, "10-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y10P),
+ { formats::R10_CSI2P, "10-bit Greyscale Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_Y12),
{ formats::R12, "12-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y16),
+ { formats::R16, "16-bit Greyscale" } },
/* Bayer formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8),
@@ -171,6 +183,22 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::SGRBG12_CSI2P, "12-bit Bayer GRGR/BGBG Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P),
{ formats::SRGGB12_CSI2P, "12-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14),
+ { formats::SBGGR14, "14-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14),
+ { formats::SGBRG14, "14-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14),
+ { formats::SGRBG14, "14-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14),
+ { formats::SRGGB14, "14-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P),
+ { formats::SBGGR14_CSI2P, "14-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P),
+ { formats::SGBRG14_CSI2P, "14-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P),
+ { formats::SGRBG14_CSI2P, "14-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P),
+ { formats::SRGGB14_CSI2P, "14-bit Bayer RGRG/GBGB Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16),
{ formats::SBGGR16, "16-bit Bayer BGBG/GRGR" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16),
@@ -179,10 +207,22 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::SGRBG16, "16-bit Bayer GRGR/BGBG" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
{ formats::SRGGB16, "16-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR),
+ { formats::BGGR_PISP_COMP1, "16-bit Bayer BGBG/GRGR PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG),
+ { formats::GBRG_PISP_COMP1, "16-bit Bayer GBGB/RGRG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG),
+ { formats::GRBG_PISP_COMP1, "16-bit Bayer GRGR/BGBG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB),
+ { formats::RGGB_PISP_COMP1, "16-bit Bayer RGRG/GBGB PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO),
+ { formats::MONO_PISP_COMP1, "16-bit Mono PiSP Compress Mode 1" } },
/* Compressed formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
{ formats::MJPEG, "Motion-JPEG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
+ { formats::MJPEG, "JPEG JFIF" } },
};
} /* namespace */
@@ -286,15 +326,23 @@ const char *V4L2PixelFormat::description() const
/**
* \brief Convert the V4L2 pixel format to the corresponding PixelFormat
+ * \param[in] warn When true, log a warning message if the V4L2 pixel format
+ * isn't known
+ *
+ * Users of this function might try to convert a V4L2PixelFormat to a
+ * PixelFormat just to check if the format is supported or not. In that case,
+ * they can suppress the warning message by setting the \a warn argument to
+ * false to not pollute the log with unnecessary messages.
+ *
* \return The PixelFormat corresponding to the V4L2 pixel format
*/
-PixelFormat V4L2PixelFormat::toPixelFormat() const
+PixelFormat V4L2PixelFormat::toPixelFormat(bool warn) const
{
const auto iter = vpf2pf.find(*this);
if (iter == vpf2pf.end()) {
- LOG(V4L2, Warning)
- << "Unsupported V4L2 pixel format "
- << toString();
+ if (warn)
+ LOG(V4L2, Warning) << "Unsupported V4L2 pixel format "
+ << toString();
return PixelFormat();
}
@@ -302,26 +350,24 @@ PixelFormat V4L2PixelFormat::toPixelFormat() const
}
/**
- * \brief Convert \a pixelFormat to its corresponding V4L2PixelFormat
+ * \brief Retrieve the list of V4L2PixelFormat associated with \a pixelFormat
* \param[in] pixelFormat The PixelFormat to convert
- * \param[in] multiplanar V4L2 Multiplanar API support flag
*
- * Multiple V4L2 formats may exist for one PixelFormat when the format uses
- * multiple planes, as V4L2 defines separate 4CCs for contiguous and separate
- * planes formats. Set the \a multiplanar parameter to false to select a format
- * with contiguous planes, or to true to select a format with non-contiguous
- * planes.
+ * Multiple V4L2 formats may exist for one PixelFormat as V4L2 defines separate
+ * 4CCs for contiguous and non-contiguous versions of the same image format.
*
- * \return The V4L2PixelFormat corresponding to \a pixelFormat
+ * \return The list of V4L2PixelFormat corresponding to \a pixelFormat
*/
-V4L2PixelFormat V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat,
- bool multiplanar)
+const std::vector<V4L2PixelFormat> &
+V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat)
{
+ static const std::vector<V4L2PixelFormat> empty;
+
const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
if (!info.isValid())
- return V4L2PixelFormat();
+ return empty;
- return multiplanar ? info.v4l2Formats.multi : info.v4l2Formats.single;
+ return info.v4l2Formats;
}
/**
diff --git a/src/libcamera/v4l2_subdevice.cpp b/src/libcamera/v4l2_subdevice.cpp
index 98a3911a..6da77775 100644
--- a/src/libcamera/v4l2_subdevice.cpp
+++ b/src/libcamera/v4l2_subdevice.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice.cpp - V4L2 Subdevice
+ * V4L2 Subdevice
*/
#include "libcamera/internal/v4l2_subdevice.h"
@@ -23,6 +23,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include "libcamera/internal/formats.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/media_object.h"
@@ -35,105 +36,727 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(V4L2)
-namespace {
-
-/*
- * \struct V4L2SubdeviceFormatInfo
+/**
+ * \class MediaBusFormatInfo
* \brief Information about media bus formats
- * \param bitsPerPixel Bits per pixel
- * \param name Name of MBUS format
+ *
+ * The MediaBusFormatInfo class groups together information describing a media
+ * bus format. It facilitates handling of media bus formats by providing data
+ * commonly used in pipeline handlers.
+ *
+ * \var MediaBusFormatInfo::name
+ * \brief The format name as a human-readable string, used as the text
+ * representation of the format
+ *
+ * \var MediaBusFormatInfo::code
+ * \brief The media bus format code described by this instance (MEDIA_BUS_FMT_*)
+ *
+ * \var MediaBusFormatInfo::type
+ * \brief The media bus format type
+ *
+ * \var MediaBusFormatInfo::bitsPerPixel
+ * \brief The average number of bits per pixel
+ *
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
+ *
+ * For formats that transmit multiple or fractional pixels per sample, the
+ * value will differ from the bus width.
+ *
+ * Formats that don't have a fixed number of bits per pixel, such as compressed
+ * formats, or device-specific embedded data formats, report 0 in this field.
+ *
+ * \var MediaBusFormatInfo::colourEncoding
+ * \brief The colour encoding type
+ *
+ * This field is valid for Type::Image formats only.
*/
-struct V4L2SubdeviceFormatInfo {
- unsigned int bitsPerPixel;
- const char *name;
-};
-/*
- * \var formatInfoMap
- * \brief A map that associates V4L2SubdeviceFormatInfo struct to V4L2 media
- * bus codes
- */
-const std::map<uint32_t, V4L2SubdeviceFormatInfo> formatInfoMap = {
- { MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, { 16, "RGB444_2X8_PADHI_BE" } },
- { MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE, { 16, "RGB444_2X8_PADHI_LE" } },
- { MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, { 16, "RGB555_2X8_PADHI_BE" } },
- { MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, { 16, "RGB555_2X8_PADHI_LE" } },
- { MEDIA_BUS_FMT_RGB565_1X16, { 16, "RGB565_1X16" } },
- { MEDIA_BUS_FMT_BGR565_2X8_BE, { 16, "BGR565_2X8_BE" } },
- { MEDIA_BUS_FMT_BGR565_2X8_LE, { 16, "BGR565_2X8_LE" } },
- { MEDIA_BUS_FMT_RGB565_2X8_BE, { 16, "RGB565_2X8_BE" } },
- { MEDIA_BUS_FMT_RGB565_2X8_LE, { 16, "RGB565_2X8_LE" } },
- { MEDIA_BUS_FMT_RGB666_1X18, { 18, "RGB666_1X18" } },
- { MEDIA_BUS_FMT_RGB888_1X24, { 24, "RGB888_1X24" } },
- { MEDIA_BUS_FMT_RGB888_2X12_BE, { 24, "RGB888_2X12_BE" } },
- { MEDIA_BUS_FMT_RGB888_2X12_LE, { 24, "RGB888_2X12_LE" } },
- { MEDIA_BUS_FMT_ARGB8888_1X32, { 32, "ARGB8888_1X32" } },
- { MEDIA_BUS_FMT_Y8_1X8, { 8, "Y8_1X8" } },
- { MEDIA_BUS_FMT_UV8_1X8, { 8, "UV8_1X8" } },
- { MEDIA_BUS_FMT_UYVY8_1_5X8, { 12, "UYVY8_1_5X8" } },
- { MEDIA_BUS_FMT_VYUY8_1_5X8, { 12, "VYUY8_1_5X8" } },
- { MEDIA_BUS_FMT_YUYV8_1_5X8, { 12, "YUYV8_1_5X8" } },
- { MEDIA_BUS_FMT_YVYU8_1_5X8, { 12, "YVYU8_1_5X8" } },
- { MEDIA_BUS_FMT_UYVY8_2X8, { 16, "UYVY8_2X8" } },
- { MEDIA_BUS_FMT_VYUY8_2X8, { 16, "VYUY8_2X8" } },
- { MEDIA_BUS_FMT_YUYV8_2X8, { 16, "YUYV8_2X8" } },
- { MEDIA_BUS_FMT_YVYU8_2X8, { 16, "YVYU8_2X8" } },
- { MEDIA_BUS_FMT_Y10_1X10, { 10, "Y10_1X10" } },
- { MEDIA_BUS_FMT_UYVY10_2X10, { 20, "UYVY10_2X10" } },
- { MEDIA_BUS_FMT_VYUY10_2X10, { 20, "VYUY10_2X10" } },
- { MEDIA_BUS_FMT_YUYV10_2X10, { 20, "YUYV10_2X10" } },
- { MEDIA_BUS_FMT_YVYU10_2X10, { 20, "YVYU10_2X10" } },
- { MEDIA_BUS_FMT_Y12_1X12, { 12, "Y12_1X12" } },
- { MEDIA_BUS_FMT_UYVY8_1X16, { 16, "UYVY8_1X16" } },
- { MEDIA_BUS_FMT_VYUY8_1X16, { 16, "VYUY8_1X16" } },
- { MEDIA_BUS_FMT_YUYV8_1X16, { 16, "YUYV8_1X16" } },
- { MEDIA_BUS_FMT_YVYU8_1X16, { 16, "YVYU8_1X16" } },
- { MEDIA_BUS_FMT_YDYUYDYV8_1X16, { 16, "YDYUYDYV8_1X16" } },
- { MEDIA_BUS_FMT_UYVY10_1X20, { 20, "UYVY10_1X20" } },
- { MEDIA_BUS_FMT_VYUY10_1X20, { 20, "VYUY10_1X20" } },
- { MEDIA_BUS_FMT_YUYV10_1X20, { 20, "YUYV10_1X20" } },
- { MEDIA_BUS_FMT_YVYU10_1X20, { 20, "YVYU10_1X20" } },
- { MEDIA_BUS_FMT_YUV8_1X24, { 24, "YUV8_1X24" } },
- { MEDIA_BUS_FMT_YUV10_1X30, { 30, "YUV10_1X30" } },
- { MEDIA_BUS_FMT_AYUV8_1X32, { 32, "AYUV8_1X32" } },
- { MEDIA_BUS_FMT_UYVY12_2X12, { 24, "UYVY12_2X12" } },
- { MEDIA_BUS_FMT_VYUY12_2X12, { 24, "VYUY12_2X12" } },
- { MEDIA_BUS_FMT_YUYV12_2X12, { 24, "YUYV12_2X12" } },
- { MEDIA_BUS_FMT_YVYU12_2X12, { 24, "YVYU12_2X12" } },
- { MEDIA_BUS_FMT_UYVY12_1X24, { 24, "UYVY12_1X24" } },
- { MEDIA_BUS_FMT_VYUY12_1X24, { 24, "VYUY12_1X24" } },
- { MEDIA_BUS_FMT_YUYV12_1X24, { 24, "YUYV12_1X24" } },
- { MEDIA_BUS_FMT_YVYU12_1X24, { 24, "YVYU12_1X24" } },
- { MEDIA_BUS_FMT_SBGGR8_1X8, { 8, "SBGGR8_1X8" } },
- { MEDIA_BUS_FMT_SGBRG8_1X8, { 8, "SGBRG8_1X8" } },
- { MEDIA_BUS_FMT_SGRBG8_1X8, { 8, "SGRBG8_1X8" } },
- { MEDIA_BUS_FMT_SRGGB8_1X8, { 8, "SRGGB8_1X8" } },
- { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, { 8, "SBGGR10_ALAW8_1X8" } },
- { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, { 8, "SGBRG10_ALAW8_1X8" } },
- { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, { 8, "SGRBG10_ALAW8_1X8" } },
- { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, { 8, "SRGGB10_ALAW8_1X8" } },
- { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, { 8, "SBGGR10_DPCM8_1X8" } },
- { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, { 8, "SGBRG10_DPCM8_1X8" } },
- { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, { 8, "SGRBG10_DPCM8_1X8" } },
- { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, { 8, "SRGGB10_DPCM8_1X8" } },
- { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, { 16, "SBGGR10_2X8_PADHI_BE" } },
- { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, { 16, "SBGGR10_2X8_PADHI_LE" } },
- { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, { 16, "SBGGR10_2X8_PADLO_BE" } },
- { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, { 16, "SBGGR10_2X8_PADLO_LE" } },
- { MEDIA_BUS_FMT_SBGGR10_1X10, { 10, "SBGGR10_1X10" } },
- { MEDIA_BUS_FMT_SGBRG10_1X10, { 10, "SGBRG10_1X10" } },
- { MEDIA_BUS_FMT_SGRBG10_1X10, { 10, "SGRBG10_1X10" } },
- { MEDIA_BUS_FMT_SRGGB10_1X10, { 10, "SRGGB10_1X10" } },
- { MEDIA_BUS_FMT_SBGGR12_1X12, { 12, "SBGGR12_1X12" } },
- { MEDIA_BUS_FMT_SGBRG12_1X12, { 12, "SGBRG12_1X12" } },
- { MEDIA_BUS_FMT_SGRBG12_1X12, { 12, "SGRBG12_1X12" } },
- { MEDIA_BUS_FMT_SRGGB12_1X12, { 12, "SRGGB12_1X12" } },
- { MEDIA_BUS_FMT_AHSV8888_1X32, { 32, "AHSV8888_1X32" } },
+/**
+ * \enum MediaBusFormatInfo::Type
+ * \brief The format type
+ *
+ * \var MediaBusFormatInfo::Type::Image
+ * \brief The format describes image data
+ *
+ * \var MediaBusFormatInfo::Type::Metadata
+ * \brief The format describes generic metadata
+ *
+ * \var MediaBusFormatInfo::Type::EmbeddedData
+ * \brief The format describes sensor embedded data
+ */
+
+namespace {
+
+const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
+ /* This table is sorted to match the order in linux/media-bus-format.h */
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, {
+ .name = "RGB444_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE, {
+ .name = "RGB444_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, {
+ .name = "RGB555_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, {
+ .name = "RGB555_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_1X16, {
+ .name = "RGB565_1X16",
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_BE, {
+ .name = "BGR565_2X8_BE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_LE, {
+ .name = "BGR565_2X8_LE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_BE, {
+ .name = "RGB565_2X8_BE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_LE, {
+ .name = "RGB565_2X8_LE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB666_1X18, {
+ .name = "RGB666_1X18",
+ .code = MEDIA_BUS_FMT_RGB666_1X18,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 18,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR888_1X24, {
+ .name = "BGR888_1X24",
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_1X24, {
+ .name = "RGB888_1X24",
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_BE, {
+ .name = "RGB888_2X12_BE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_LE, {
+ .name = "RGB888_2X12_LE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_ARGB8888_1X32, {
+ .name = "ARGB8888_1X32",
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_Y8_1X8, {
+ .name = "Y8_1X8",
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UV8_1X8, {
+ .name = "UV8_1X8",
+ .code = MEDIA_BUS_FMT_UV8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, {
+ .name = "UYVY8_1_5X8",
+ .code = MEDIA_BUS_FMT_UYVY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, {
+ .name = "VYUY8_1_5X8",
+ .code = MEDIA_BUS_FMT_VYUY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, {
+ .name = "YUYV8_1_5X8",
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, {
+ .name = "YVYU8_1_5X8",
+ .code = MEDIA_BUS_FMT_YVYU8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, {
+ .name = "UYVY8_2X8",
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, {
+ .name = "VYUY8_2X8",
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, {
+ .name = "YUYV8_2X8",
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, {
+ .name = "YVYU8_2X8",
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y10_1X10, {
+ .name = "Y10_1X10",
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_2X10, {
+ .name = "UYVY10_2X10",
+ .code = MEDIA_BUS_FMT_UYVY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_2X10, {
+ .name = "VYUY10_2X10",
+ .code = MEDIA_BUS_FMT_VYUY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_2X10, {
+ .name = "YUYV10_2X10",
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_2X10, {
+ .name = "YVYU10_2X10",
+ .code = MEDIA_BUS_FMT_YVYU10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y12_1X12, {
+ .name = "Y12_1X12",
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y16_1X16, {
+ .name = "Y16_1X16",
+ .code = MEDIA_BUS_FMT_Y16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1X16, {
+ .name = "UYVY8_1X16",
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1X16, {
+ .name = "VYUY8_1X16",
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1X16, {
+ .name = "YUYV8_1X16",
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1X16, {
+ .name = "YVYU8_1X16",
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YDYUYDYV8_1X16, {
+ .name = "YDYUYDYV8_1X16",
+ .code = MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_1X20, {
+ .name = "UYVY10_1X20",
+ .code = MEDIA_BUS_FMT_UYVY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_1X20, {
+ .name = "VYUY10_1X20",
+ .code = MEDIA_BUS_FMT_VYUY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_1X20, {
+ .name = "YUYV10_1X20",
+ .code = MEDIA_BUS_FMT_YUYV10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_1X20, {
+ .name = "YVYU10_1X20",
+ .code = MEDIA_BUS_FMT_YVYU10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV8_1X24, {
+ .name = "YUV8_1X24",
+ .code = MEDIA_BUS_FMT_YUV8_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV10_1X30, {
+ .name = "YUV10_1X30",
+ .code = MEDIA_BUS_FMT_YUV10_1X30,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 30,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_AYUV8_1X32, {
+ .name = "AYUV8_1X32",
+ .code = MEDIA_BUS_FMT_AYUV8_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_2X12, {
+ .name = "UYVY12_2X12",
+ .code = MEDIA_BUS_FMT_UYVY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_2X12, {
+ .name = "VYUY12_2X12",
+ .code = MEDIA_BUS_FMT_VYUY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_2X12, {
+ .name = "YUYV12_2X12",
+ .code = MEDIA_BUS_FMT_YUYV12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_2X12, {
+ .name = "YVYU12_2X12",
+ .code = MEDIA_BUS_FMT_YVYU12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_1X24, {
+ .name = "UYVY12_1X24",
+ .code = MEDIA_BUS_FMT_UYVY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_1X24, {
+ .name = "VYUY12_1X24",
+ .code = MEDIA_BUS_FMT_VYUY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_1X24, {
+ .name = "YUYV12_1X24",
+ .code = MEDIA_BUS_FMT_YUYV12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_1X24, {
+ .name = "YVYU12_1X24",
+ .code = MEDIA_BUS_FMT_YVYU12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, {
+ .name = "SBGGR8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, {
+ .name = "SGBRG8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, {
+ .name = "SGRBG8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, {
+ .name = "SRGGB8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, {
+ .name = "SBGGR10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, {
+ .name = "SGBRG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, {
+ .name = "SGRBG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, {
+ .name = "SRGGB10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, {
+ .name = "SBGGR10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, {
+ .name = "SGBRG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, {
+ .name = "SGRBG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, {
+ .name = "SRGGB10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, {
+ .name = "SBGGR10_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, {
+ .name = "SBGGR10_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, {
+ .name = "SBGGR10_2X8_PADLO_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, {
+ .name = "SBGGR10_2X8_PADLO_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, {
+ .name = "SBGGR10_1X10",
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, {
+ .name = "SGBRG10_1X10",
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, {
+ .name = "SGRBG10_1X10",
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, {
+ .name = "SRGGB10_1X10",
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, {
+ .name = "SBGGR12_1X12",
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, {
+ .name = "SGBRG12_1X12",
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, {
+ .name = "SGRBG12_1X12",
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, {
+ .name = "SRGGB12_1X12",
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, {
+ .name = "SBGGR14_1X14",
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, {
+ .name = "SGBRG14_1X14",
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, {
+ .name = "SGRBG14_1X14",
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, {
+ .name = "SRGGB14_1X14",
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, {
+ .name = "SBGGR16_1X16",
+ .code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, {
+ .name = "SGBRG16_1X16",
+ .code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG16_1X16, {
+ .name = "SGRBG16_1X16",
+ .code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, {
+ .name = "SRGGB16_1X16",
+ .code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ /* \todo Clarify colour encoding for HSV formats */
+ { MEDIA_BUS_FMT_AHSV8888_1X32, {
+ .name = "AHSV8888_1X32",
+ .code = MEDIA_BUS_FMT_AHSV8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_JPEG_1X8, {
+ .name = "JPEG_1X8",
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_METADATA_FIXED, {
+ .name = "METADATA_FIXED",
+ .code = MEDIA_BUS_FMT_METADATA_FIXED,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
};
} /* namespace */
/**
+ * \fn bool MediaBusFormatInfo::isValid() const
+ * \brief Check if the media bus format info is valid
+ * \return True if the media bus format info is valid, false otherwise
+ */
+
+/**
+ * \brief Retrieve information about a media bus format
+ * \param[in] code The media bus format code
+ * \return The MediaBusFormatInfo describing the \a code if known, or an invalid
+ * MediaBusFormatInfo otherwise
+ */
+const MediaBusFormatInfo &MediaBusFormatInfo::info(uint32_t code)
+{
+ static const MediaBusFormatInfo invalid{};
+
+ const auto it = mediaBusFormatInfo.find(code);
+ if (it == mediaBusFormatInfo.end()) {
+ LOG(V4L2, Warning)
+ << "Unsupported media bus format "
+ << utils::hex(code, 4);
+ return invalid;
+ }
+
+ return it->second;
+}
+
+/**
+ * \struct V4L2SubdeviceCapability
+ * \brief struct v4l2_subdev_capability object wrapper and helpers
+ *
+ * The V4L2SubdeviceCapability structure manages the information returned by the
+ * VIDIOC_SUBDEV_QUERYCAP ioctl.
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::isReadOnly()
+ * \brief Retrieve if a subdevice is registered as read-only
+ *
+ * A V4L2 subdevice is registered as read-only if V4L2_SUBDEV_CAP_RO_SUBDEV
+ * is listed as part of its capabilities.
+ *
+ * \return True if the subdevice is registered as read-only, false otherwise
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::hasStreams()
+ * \brief Retrieve if a subdevice supports the V4L2 streams API
+ * \return True if the subdevice supports the streams API, false otherwise
+ */
+
+/**
* \struct V4L2SubdeviceFormat
* \brief The V4L2 sub-device image format and sizes
*
@@ -162,7 +785,7 @@ const std::map<uint32_t, V4L2SubdeviceFormatInfo> formatInfoMap = {
*/
/**
- * \var V4L2SubdeviceFormat::mbus_code
+ * \var V4L2SubdeviceFormat::code
* \brief The image format bus code
*/
@@ -199,23 +822,6 @@ const std::string V4L2SubdeviceFormat::toString() const
}
/**
- * \brief Retrieve the number of bits per pixel for the V4L2 subdevice format
- * \return The number of bits per pixel for the format, or 0 if the format is
- * not supported
- */
-uint8_t V4L2SubdeviceFormat::bitsPerPixel() const
-{
- const auto it = formatInfoMap.find(mbus_code);
- if (it == formatInfoMap.end()) {
- LOG(V4L2, Error) << "No information available for format '"
- << *this << "'";
- return 0;
- }
-
- return it->second.bitsPerPixel;
-}
-
-/**
* \brief Insert a text representation of a V4L2SubdeviceFormat into an output
* stream
* \param[in] out The output stream
@@ -226,10 +832,10 @@ std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f)
{
out << f.size << "-";
- const auto it = formatInfoMap.find(f.mbus_code);
+ const auto it = mediaBusFormatInfo.find(f.code);
- if (it == formatInfoMap.end())
- out << utils::hex(f.mbus_code, 4);
+ if (it == mediaBusFormatInfo.end())
+ out << utils::hex(f.code, 4);
else
out << it->second.name;
@@ -265,6 +871,134 @@ std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f)
*/
/**
+ * \class V4L2Subdevice::Stream
+ * \brief V4L2 subdevice stream
+ *
+ * This class identifies a subdev stream, by bundling the pad number with the
+ * stream number. It is used in all stream-aware functions of the V4L2Subdevice
+ * class to identify the stream the functions operate on.
+ *
+ * \var V4L2Subdevice::Stream::pad
+ * \brief The 0-indexed pad number
+ *
+ * \var V4L2Subdevice::Stream::stream
+ * \brief The stream number
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream()
+ * \brief Construct a Stream with pad and stream set to 0
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream(unsigned int pad, unsigned int stream)
+ * \brief Construct a Stream with a given \a pad and \a stream number
+ * \param[in] pad The indexed pad number
+ * \param[in] stream The stream number
+ */
+
+/**
+ * \brief Compare streams for equality
+ * \return True if the two streams are equal, false otherwise
+ */
+bool operator==(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+{
+ return lhs.pad == rhs.pad && lhs.stream == rhs.stream;
+}
+
+/**
+ * \fn bool operator!=(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+ * \brief Compare streams for inequality
+ * \return True if the two streams are not equal, false otherwise
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Stream into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] stream The V4L2Subdevice::Stream
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Stream &stream)
+{
+ out << stream.pad << "/" << stream.stream;
+
+ return out;
+}
+
+/**
+ * \class V4L2Subdevice::Route
+ * \brief V4L2 subdevice routing table entry
+ *
+ * This class models a route in the subdevice routing table. It is similar to
+ * the v4l2_subdev_route structure, but uses the V4L2Subdevice::Stream class
+ * for easier usage with the V4L2Subdevice stream-aware functions.
+ *
+ * \var V4L2Subdevice::Route::sink
+ * \brief The sink stream of the route
+ *
+ * \var V4L2Subdevice::Route::source
+ * \brief The source stream of the route
+ *
+ * \var V4L2Subdevice::Route::flags
+ * \brief The route flags (V4L2_SUBDEV_ROUTE_FL_*)
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route()
+ * \brief Construct a Route with default streams
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route(const Stream &sink, const Stream &source,
+ * uint32_t flags)
+ * \brief Construct a Route from \a sink to \a source
+ * \param[in] sink The sink stream
+ * \param[in] source The source stream
+ * \param[in] flags The route flags
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Route into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] route The V4L2Subdevice::Route
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Route &route)
+{
+ out << route.sink << " -> " << route.source
+ << " (" << utils::hex(route.flags) << ")";
+
+ return out;
+}
+
+/**
+ * \typedef V4L2Subdevice::Routing
+ * \brief V4L2 subdevice routing table
+ *
+ * This class stores a subdevice routing table as a vector of routes.
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Routing into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] routing The V4L2Subdevice::Routing
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Routing &routing)
+{
+ for (const auto &[i, route] : utils::enumerate(routing)) {
+ out << "[" << i << "] " << route;
+ if (i != routing.size() - 1)
+ out << ", ";
+ }
+
+ return out;
+}
+
+/**
* \brief Create a V4L2 subdevice from a MediaEntity using its device node
* path
*/
@@ -284,7 +1018,40 @@ V4L2Subdevice::~V4L2Subdevice()
*/
int V4L2Subdevice::open()
{
- return V4L2Device::open(O_RDWR);
+ int ret = V4L2Device::open(O_RDWR);
+ if (ret)
+ return ret;
+
+ /*
+ * Try to query the subdev capabilities. The VIDIOC_SUBDEV_QUERYCAP API
+ * was introduced in kernel v5.8, ENOTTY errors must be ignored to
+ * support older kernels.
+ */
+ caps_ = {};
+ ret = ioctl(VIDIOC_SUBDEV_QUERYCAP, &caps_);
+ if (ret < 0 && errno != ENOTTY) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to query capabilities: " << strerror(-ret);
+ return ret;
+ }
+
+ /* If the subdev supports streams, enable the streams API. */
+ if (caps_.hasStreams()) {
+ struct v4l2_subdev_client_capability clientCaps{};
+ clientCaps.capabilities = V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+
+ ret = ioctl(VIDIOC_SUBDEV_S_CLIENT_CAP, &clientCaps);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to set client capabilities: "
+ << strerror(-ret);
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -295,7 +1062,7 @@ int V4L2Subdevice::open()
/**
* \brief Get selection rectangle \a rect for \a target
- * \param[in] pad The 0-indexed pad number the rectangle is retrieved from
+ * \param[in] stream The stream the rectangle is retrieved from
* \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
* \param[out] rect The retrieved selection rectangle
*
@@ -303,13 +1070,14 @@ int V4L2Subdevice::open()
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
+int V4L2Subdevice::getSelection(const Stream &stream, unsigned int target,
Rectangle *rect)
{
struct v4l2_subdev_selection sel = {};
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sel.pad = pad;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
sel.target = target;
sel.flags = 0;
@@ -317,7 +1085,7 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
if (ret < 0) {
LOG(V4L2, Error)
<< "Unable to get rectangle " << target << " on pad "
- << pad << ": " << strerror(-ret);
+ << stream << ": " << strerror(-ret);
return ret;
}
@@ -330,8 +1098,19 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
}
/**
+ * \fn V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Get selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is retrieved from
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The retrieved selection rectangle
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
* \brief Set selection rectangle \a rect for \a target
- * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
+ * \param[in] stream The stream the rectangle is to be applied to
* \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
* \param[inout] rect The selection rectangle to be applied
*
@@ -339,13 +1118,14 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
+int V4L2Subdevice::setSelection(const Stream &stream, unsigned int target,
Rectangle *rect)
{
struct v4l2_subdev_selection sel = {};
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sel.pad = pad;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
sel.target = target;
sel.flags = 0;
@@ -358,7 +1138,7 @@ int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
if (ret < 0) {
LOG(V4L2, Error)
<< "Unable to set rectangle " << target << " on pad "
- << pad << ": " << strerror(-ret);
+ << stream << ": " << strerror(-ret);
return ret;
}
@@ -369,26 +1149,40 @@ int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
return 0;
}
+
/**
- * \brief Enumerate all media bus codes and frame sizes on a \a pad
- * \param[in] pad The 0-indexed pad number to enumerate formats on
+ * \fn V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Set selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \brief Enumerate all media bus codes and frame sizes on a \a stream
+ * \param[in] stream The stream to enumerate formats for
*
* Enumerate all media bus codes and frame sizes supported by the subdevice on
- * a \a pad.
+ * a \a stream.
*
* \return A list of the supported device formats
*/
-V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
+V4L2Subdevice::Formats V4L2Subdevice::formats(const Stream &stream)
{
Formats formats;
- if (pad >= entity_->pads().size()) {
- LOG(V4L2, Error) << "Invalid pad: " << pad;
+ if (stream.pad >= entity_->pads().size()) {
+ LOG(V4L2, Error) << "Invalid pad: " << stream.pad;
return {};
}
- for (unsigned int code : enumPadCodes(pad)) {
- std::vector<SizeRange> sizes = enumPadSizes(pad, code);
+ for (unsigned int code : enumPadCodes(stream)) {
+ std::vector<SizeRange> sizes = enumPadSizes(stream, code);
if (sizes.empty())
return {};
@@ -396,7 +1190,7 @@ V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
if (!inserted.second) {
LOG(V4L2, Error)
<< "Could not add sizes for media bus code "
- << code << " on pad " << pad;
+ << code << " on pad " << stream.pad;
return {};
}
}
@@ -405,79 +1199,273 @@ V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
}
/**
- * \brief Retrieve the image format set on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \fn V4L2Subdevice::formats(unsigned int pad)
+ * \brief Enumerate all media bus codes and frame sizes on a \a pad
+ * \param[in] pad The 0-indexed pad number to enumerate formats on
+ *
+ * Enumerate all media bus codes and frame sizes supported by the subdevice on
+ * a \a pad
+ *
+ * \return A list of the supported device formats
+ */
+
+std::optional<ColorSpace> V4L2Subdevice::toColorSpace(const v4l2_mbus_framefmt &format) const
+{
+ /*
+ * Only image formats have a color space, for other formats (such as
+ * metadata formats) the color space concept isn't applicable. V4L2
+ * subdev drivers return a colorspace set to V4L2_COLORSPACE_DEFAULT in
+ * that case (as well as for image formats when the driver hasn't
+ * bothered implementing color space support). Check the colorspace
+ * field here and return std::nullopt directly to avoid logging a
+ * warning.
+ */
+ if (format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ return std::nullopt;
+
+ PixelFormatInfo::ColourEncoding colourEncoding;
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(format.code);
+ if (info.isValid()) {
+ colourEncoding = info.colourEncoding;
+ } else {
+ LOG(V4L2, Warning)
+ << "Unknown subdev format "
+ << utils::hex(format.code, 4)
+ << ", defaulting to RGB encoding";
+
+ colourEncoding = PixelFormatInfo::ColourEncodingRGB;
+ }
+
+ return V4L2Device::toColorSpace(format, colourEncoding);
+}
+
+/**
+ * \brief Retrieve the image format set on one of the V4L2 subdevice streams
+ * \param[in] stream The stream the format is to be retrieved from
* \param[out] format The image bus format
* \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
* "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+int V4L2Subdevice::getFormat(const Stream &stream, V4L2SubdeviceFormat *format,
Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = whence == ActiveFormat ? V4L2_SUBDEV_FORMAT_ACTIVE
- : V4L2_SUBDEV_FORMAT_TRY;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
int ret = ioctl(VIDIOC_SUBDEV_G_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to get format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to get format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
format->colorSpace = toColorSpace(subdevFmt.format);
return 0;
}
/**
+ * \fn V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Retrieve the image format set on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \param[out] format The image bus format
+ * \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
* \brief Set an image format on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be applied to
- * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] stream The stream the format is to be applied to
+ * \param[inout] format The image bus format to apply to the stream
* \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
* "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
*
- * Apply the requested image format to the desired media pad and return the
+ * Apply the requested image format to the desired stream and return the
* actually applied format parameters, as getFormat() would do.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+int V4L2Subdevice::setFormat(const Stream &stream, V4L2SubdeviceFormat *format,
Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = whence == ActiveFormat ? V4L2_SUBDEV_FORMAT_ACTIVE
- : V4L2_SUBDEV_FORMAT_TRY;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
subdevFmt.format.width = format->size.width;
subdevFmt.format.height = format->size.height;
- subdevFmt.format.code = format->mbus_code;
+ subdevFmt.format.code = format->code;
subdevFmt.format.field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, subdevFmt.format);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, subdevFmt.format);
+
+ /* The CSC flag is only applicable to source pads. */
+ if (entity_->pads()[stream.pad]->flags() & MEDIA_PAD_FL_SOURCE)
+ subdevFmt.format.flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
+ }
int ret = ioctl(VIDIOC_SUBDEV_S_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to set format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to set format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
format->colorSpace = toColorSpace(subdevFmt.format);
return 0;
}
/**
+ * \fn V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Set an image format on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be applied to
+ * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply the requested image format to the desired media pad and return the
+ * actually applied format parameters, as getFormat() would do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+namespace {
+
+void routeFromKernel(V4L2Subdevice::Route &route,
+ const struct v4l2_subdev_route &kroute)
+{
+ route.sink.pad = kroute.sink_pad;
+ route.sink.stream = kroute.sink_stream;
+ route.source.pad = kroute.source_pad;
+ route.source.stream = kroute.source_stream;
+ route.flags = kroute.flags;
+}
+
+void routeToKernel(const V4L2Subdevice::Route &route,
+ struct v4l2_subdev_route &kroute)
+{
+ kroute.sink_pad = route.sink.pad;
+ kroute.sink_stream = route.sink.stream;
+ kroute.source_pad = route.source.pad;
+ kroute.source_stream = route.source.stream;
+ kroute.flags = route.flags;
+}
+
+} /* namespace */
+
+/**
+ * \brief Retrieve the subdevice's internal routing table
+ * \param[out] routing The routing table
+ * \param[in] whence The routing table to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::getRouting(Routing *routing, Whence whence)
+{
+ routing->clear();
+
+ if (!caps_.hasStreams())
+ return 0;
+
+ struct v4l2_subdev_routing rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret == 0 || ret == -ENOTTY)
+ return ret;
+
+ if (ret != -ENOSPC) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
+ * \brief Set a routing table on the V4L2 subdevice
+ * \param[inout] routing The routing table
+ * \param[in] whence The routing table to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply to the V4L2 subdevice the routing table \a routing and update its
+ * content to reflect the actually applied routing table as getRouting() would
+ * do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::setRouting(Routing *routing, Whence whence)
+{
+ if (!caps_.hasStreams()) {
+ routing->clear();
+ return 0;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing rt = {};
+ rt.which = whence;
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ routes.resize(rt.num_routes);
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
* \brief Retrieve the model name of the device
*
* The model name allows identification of the specific device model. This can
@@ -530,6 +1518,12 @@ const std::string &V4L2Subdevice::model()
}
/**
+ * \fn V4L2Subdevice::caps()
+ * \brief Retrieve the subdevice V4L2 capabilities
+ * \return The subdevice V4L2 capabilities
+ */
+
+/**
* \brief Create a new video subdevice instance from \a entity in media device
* \a media
* \param[in] media The media device where the entity is registered
@@ -553,14 +1547,15 @@ std::string V4L2Subdevice::logPrefix() const
return "'" + entity_->name() + "'";
}
-std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
+std::vector<unsigned int> V4L2Subdevice::enumPadCodes(const Stream &stream)
{
std::vector<unsigned int> codes;
int ret;
for (unsigned int index = 0; ; index++) {
struct v4l2_subdev_mbus_code_enum mbusEnum = {};
- mbusEnum.pad = pad;
+ mbusEnum.pad = stream.pad;
+ mbusEnum.stream = stream.stream;
mbusEnum.index = index;
mbusEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -573,7 +1568,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
if (ret < 0 && ret != -EINVAL) {
LOG(V4L2, Error)
- << "Unable to enumerate formats on pad " << pad
+ << "Unable to enumerate formats on pad " << stream
<< ": " << strerror(-ret);
return {};
}
@@ -581,7 +1576,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
return codes;
}
-std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
+std::vector<SizeRange> V4L2Subdevice::enumPadSizes(const Stream &stream,
unsigned int code)
{
std::vector<SizeRange> sizes;
@@ -590,7 +1585,8 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
for (unsigned int index = 0;; index++) {
struct v4l2_subdev_frame_size_enum sizeEnum = {};
sizeEnum.index = index;
- sizeEnum.pad = pad;
+ sizeEnum.pad = stream.pad;
+ sizeEnum.stream = stream.stream;
sizeEnum.code = code;
sizeEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -604,7 +1600,7 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
if (ret < 0 && ret != -EINVAL && ret != -ENOTTY) {
LOG(V4L2, Error)
- << "Unable to enumerate sizes on pad " << pad
+ << "Unable to enumerate sizes on pad " << stream
<< ": " << strerror(-ret);
return {};
}
diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp
index 63911339..4947aa3d 100644
--- a/src/libcamera/v4l2_videodevice.cpp
+++ b/src/libcamera/v4l2_videodevice.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_videodevice.cpp - V4L2 Video Device
+ * V4L2 Video Device
*/
#include "libcamera/internal/v4l2_videodevice.h"
@@ -633,13 +633,9 @@ int V4L2VideoDevice::open()
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
- ret = getFormat(&format_);
- if (ret) {
- LOG(V4L2, Error) << "Failed to get format";
+ ret = initFormats();
+ if (ret)
return ret;
- }
-
- formatInfo_ = &PixelFormatInfo::info(format_.fourcc);
return 0;
}
@@ -726,7 +722,24 @@ int V4L2VideoDevice::open(SharedFD handle, enum v4l2_buf_type type)
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
- ret = getFormat(&format_);
+ ret = initFormats();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int V4L2VideoDevice::initFormats()
+{
+ const std::vector<V4L2PixelFormat> &deviceFormats = enumPixelformats(0);
+ if (deviceFormats.empty()) {
+ LOG(V4L2, Error) << "Failed to initialize device formats";
+ return -EINVAL;
+ }
+
+ pixelFormats_ = { deviceFormats.begin(), deviceFormats.end() };
+
+ int ret = getFormat(&format_);
if (ret) {
LOG(V4L2, Error) << "Failed to get format";
return ret;
@@ -901,6 +914,13 @@ int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
return 0;
}
+template<typename T>
+std::optional<ColorSpace> V4L2VideoDevice::toColorSpace(const T &v4l2Format)
+{
+ V4L2PixelFormat fourcc{ v4l2Format.pixelformat };
+ return V4L2Device::toColorSpace(v4l2Format, PixelFormatInfo::info(fourcc).colourEncoding);
+}
+
int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
@@ -940,7 +960,12 @@ int V4L2VideoDevice::trySetFormatMultiplane(V4L2DeviceFormat *format, bool set)
pix->pixelformat = format->fourcc;
pix->num_planes = format->planesCount;
pix->field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, *pix);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
ASSERT(pix->num_planes <= std::size(pix->plane_fmt));
@@ -1010,7 +1035,12 @@ int V4L2VideoDevice::trySetFormatSingleplane(V4L2DeviceFormat *format, bool set)
pix->pixelformat = format->fourcc;
pix->bytesperline = format->planes[0].bpl;
pix->field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, *pix);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
@@ -1441,7 +1471,7 @@ UniqueFD V4L2VideoDevice::exportDmabufFd(unsigned int index,
expbuf.type = bufferType_;
expbuf.index = index;
expbuf.plane = plane;
- expbuf.flags = O_RDWR;
+ expbuf.flags = O_CLOEXEC | O_RDWR;
ret = ioctl(VIDIOC_EXPBUF, &expbuf);
if (ret < 0) {
@@ -1503,6 +1533,9 @@ int V4L2VideoDevice::importBuffers(unsigned int count)
*/
int V4L2VideoDevice::releaseBuffers()
{
+ if (!cache_)
+ return 0;
+
LOG(V4L2, Debug) << "Releasing buffers";
delete cache_;
@@ -1593,6 +1626,11 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
if (V4L2_TYPE_IS_OUTPUT(buf.type)) {
const FrameMetadata &metadata = buffer->metadata();
+ for (const auto &plane : metadata.planes()) {
+ if (!plane.bytesused)
+ LOG(V4L2, Warning) << "byteused == 0 is deprecated";
+ }
+
if (numV4l2Planes != planes.size()) {
/*
* If we have a multi-planar buffer with a V4L2
@@ -1761,12 +1799,14 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
}
- buffer->metadata_.status = buf.flags & V4L2_BUF_FLAG_ERROR
- ? FrameMetadata::FrameError
- : FrameMetadata::FrameSuccess;
- buffer->metadata_.sequence = buf.sequence;
- buffer->metadata_.timestamp = buf.timestamp.tv_sec * 1000000000ULL
- + buf.timestamp.tv_usec * 1000ULL;
+ FrameMetadata &metadata = buffer->_d()->metadata();
+
+ metadata.status = buf.flags & V4L2_BUF_FLAG_ERROR
+ ? FrameMetadata::FrameError
+ : FrameMetadata::FrameSuccess;
+ metadata.sequence = buf.sequence;
+ metadata.timestamp = buf.timestamp.tv_sec * 1000000000ULL
+ + buf.timestamp.tv_usec * 1000ULL;
if (V4L2_TYPE_IS_OUTPUT(buf.type))
return buffer;
@@ -1777,15 +1817,14 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
*/
if (!firstFrame_) {
if (buf.sequence)
- LOG(V4L2, Warning)
+ LOG(V4L2, Info)
<< "Zero sequence expected for first frame (got "
<< buf.sequence << ")";
firstFrame_ = buf.sequence;
}
- buffer->metadata_.sequence -= firstFrame_.value();
+ metadata.sequence -= firstFrame_.value();
unsigned int numV4l2Planes = multiPlanar ? buf.length : 1;
- FrameMetadata &metadata = buffer->metadata_;
if (numV4l2Planes != buffer->planes().size()) {
/*
@@ -1911,9 +1950,10 @@ int V4L2VideoDevice::streamOff()
/* Send back all queued buffers. */
for (auto it : queuedBuffers_) {
FrameBuffer *buffer = it.second;
+ FrameMetadata &metadata = buffer->_d()->metadata();
cache_->put(it.first);
- buffer->metadata_.status = FrameMetadata::FrameCancelled;
+ metadata.status = FrameMetadata::FrameCancelled;
bufferReady.emit(buffer);
}
@@ -1990,6 +2030,40 @@ V4L2VideoDevice::fromEntityName(const MediaDevice *media,
}
/**
+ * \brief Convert \a PixelFormat to a V4L2PixelFormat supported by the device
+ * \param[in] pixelFormat The PixelFormat to convert
+ *
+ * Convert \a pixelformat to a V4L2 FourCC that is known to be supported by
+ * the video device.
+ *
+ * A V4L2VideoDevice may support different V4L2 pixel formats that map the same
+ * PixelFormat. This is the case of the contiguous and non-contiguous variants
+ * of multiplanar formats, and with the V4L2 MJPEG and JPEG pixel formats.
+ * Converting a PixelFormat to a V4L2PixelFormat may thus have multiple answers.
+ *
+ * This function converts the \a pixelFormat using the list of V4L2 pixel
+ * formats that the V4L2VideoDevice supports. This guarantees that the returned
+ * V4L2PixelFormat will be valid for the device. If multiple matches are still
+ * possible, contiguous variants are preferred. If the \a pixelFormat is not
+ * supported by the device, the function returns an invalid V4L2PixelFormat.
+ *
+ * \return The V4L2PixelFormat corresponding to \a pixelFormat if supported by
+ * the device, or an invalid V4L2PixelFormat otherwise
+ */
+V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat) const
+{
+ const std::vector<V4L2PixelFormat> &v4l2PixelFormats =
+ V4L2PixelFormat::fromPixelFormat(pixelFormat);
+
+ for (const V4L2PixelFormat &v4l2Format : v4l2PixelFormats) {
+ if (pixelFormats_.count(v4l2Format))
+ return v4l2Format;
+ }
+
+ return {};
+}
+
+/**
* \class V4L2M2MDevice
* \brief Memory-to-Memory video device
*
diff --git a/src/libcamera/version.cpp.in b/src/libcamera/version.cpp.in
index 5aec08a1..bf5a2c30 100644
--- a/src/libcamera/version.cpp.in
+++ b/src/libcamera/version.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * version.cpp - libcamera version
+ * libcamera version
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/yaml_parser.cpp b/src/libcamera/yaml_parser.cpp
index 5c45e44e..55f81916 100644
--- a/src/libcamera/yaml_parser.cpp
+++ b/src/libcamera/yaml_parser.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * yaml_parser.cpp - libcamera YAML parsing helper
+ * libcamera YAML parsing helper
*/
#include "libcamera/internal/yaml_parser.h"
@@ -31,12 +31,6 @@ namespace {
/* Empty static YamlObject as a safe result for invalid operations */
static const YamlObject empty;
-void setOk(bool *ok, bool result)
-{
- if (ok)
- *ok = result;
-}
-
} /* namespace */
/**
@@ -91,7 +85,6 @@ std::size_t YamlObject::size() const
{
switch (type_) {
case Type::Dictionary:
- return dictionary_.size();
case Type::List:
return list_.size();
default:
@@ -100,232 +93,293 @@ std::size_t YamlObject::size() const
}
/**
- * \fn template<typename T> YamlObject::get<T>(
- * const T &defaultValue, bool *ok) const
+ * \fn template<typename T> YamlObject::get<T>() const
* \brief Parse the YamlObject as a \a T value
- * \param[in] defaultValue The default value when failing to parse
- * \param[out] ok The result of whether the parse succeeded
*
* This function parses the value of the YamlObject as a \a T object, and
* returns the value. If parsing fails (usually because the YamlObject doesn't
- * store a \a T value), the \a defaultValue is returned, and \a ok is set to
- * false. Otherwise, the YamlObject value is returned, and \a ok is set to true.
+ * store a \a T value), std::nullopt is returned.
+ *
+ * \return The YamlObject value, or std::nullopt if parsing failed
+ */
+
+/**
+ * \fn template<typename T> YamlObject::get<T>(const T &defaultValue) const
+ * \brief Parse the YamlObject as a \a T value
+ * \param[in] defaultValue The default value when failing to parse
*
- * The \a ok pointer is optional and can be a nullptr if the caller doesn't
- * need to know if parsing succeeded.
+ * This function parses the value of the YamlObject as a \a T object, and
+ * returns the value. If parsing fails (usually because the YamlObject doesn't
+ * store a \a T value), the \a defaultValue is returned.
*
- * \return Value as a bool type
+ * \return The YamlObject value, or \a defaultValue if parsing failed
*/
#ifndef __DOXYGEN__
template<>
-bool YamlObject::get(const bool &defaultValue, bool *ok) const
+std::optional<bool> YamlObject::get() const
{
- setOk(ok, false);
-
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
- if (value_ == "true") {
- setOk(ok, true);
+ if (value_ == "true")
return true;
- } else if (value_ == "false") {
- setOk(ok, true);
+ else if (value_ == "false")
return false;
- }
- return defaultValue;
+ return std::nullopt;
}
-template<>
-int16_t YamlObject::get(const int16_t &defaultValue, bool *ok) const
-{
- setOk(ok, false);
-
- if (type_ != Type::Value)
- return defaultValue;
+namespace {
- if (value_ == "")
- return defaultValue;
+bool parseSignedInteger(const std::string &str, long min, long max,
+ long *result)
+{
+ if (str == "")
+ return false;
char *end;
errno = 0;
- int16_t value = std::strtol(value_.c_str(), &end, 10);
+ long value = std::strtol(str.c_str(), &end, 10);
- if ('\0' != *end || errno == ERANGE ||
- value < std::numeric_limits<int16_t>::min() ||
- value > std::numeric_limits<int16_t>::max())
- return defaultValue;
+ if ('\0' != *end || errno == ERANGE || value < min || value > max)
+ return false;
- setOk(ok, true);
- return value;
+ *result = value;
+ return true;
}
-template<>
-uint16_t YamlObject::get(const uint16_t &defaultValue, bool *ok) const
+bool parseUnsignedInteger(const std::string &str, unsigned long max,
+ unsigned long *result)
{
- setOk(ok, false);
-
- if (type_ != Type::Value)
- return defaultValue;
-
- if (value_ == "")
- return defaultValue;
+ if (str == "")
+ return false;
/*
- * libyaml parses all scalar values as strings. When a string has
- * leading spaces before a minus sign, for example " -10", strtoul
- * skips leading spaces, accepts the leading minus sign, and the
- * calculated digits are negated as if by unary minus. Rule it out in
- * case the user gets a large number when the value is negative.
+ * strtoul() accepts strings representing a negative number, in which
+ * case it negates the converted value. We don't want to silently accept
+ * negative values and return a large positive number, so check for a
+ * minus sign (after optional whitespace) and return an error.
*/
- std::size_t found = value_.find_first_not_of(" \t");
- if (found != std::string::npos && value_[found] == '-')
- return defaultValue;
+ std::size_t found = str.find_first_not_of(" \t");
+ if (found != std::string::npos && str[found] == '-')
+ return false;
char *end;
errno = 0;
- uint16_t value = std::strtoul(value_.c_str(), &end, 10);
+ unsigned long value = std::strtoul(str.c_str(), &end, 10);
- if ('\0' != *end || errno == ERANGE ||
- value < std::numeric_limits<uint16_t>::min() ||
- value > std::numeric_limits<uint16_t>::max())
- return defaultValue;
+ if ('\0' != *end || errno == ERANGE || value > max)
+ return false;
- setOk(ok, true);
- return value;
+ *result = value;
+ return true;
}
+} /* namespace */
+
template<>
-int32_t YamlObject::get(const int32_t &defaultValue, bool *ok) const
+std::optional<int8_t> YamlObject::get() const
{
- setOk(ok, false);
-
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
- if (value_ == "")
- return defaultValue;
+ long value;
- char *end;
+ if (!parseSignedInteger(value_, std::numeric_limits<int8_t>::min(),
+ std::numeric_limits<int8_t>::max(), &value))
+ return std::nullopt;
- errno = 0;
- long value = std::strtol(value_.c_str(), &end, 10);
+ return value;
+}
+
+template<>
+std::optional<uint8_t> YamlObject::get() const
+{
+ if (type_ != Type::Value)
+ return std::nullopt;
- if ('\0' != *end || errno == ERANGE ||
- value < std::numeric_limits<int32_t>::min() ||
- value > std::numeric_limits<int32_t>::max())
- return defaultValue;
+ unsigned long value;
+
+ if (!parseUnsignedInteger(value_, std::numeric_limits<uint8_t>::max(),
+ &value))
+ return std::nullopt;
- setOk(ok, true);
return value;
}
template<>
-uint32_t YamlObject::get(const uint32_t &defaultValue, bool *ok) const
+std::optional<int16_t> YamlObject::get() const
{
- setOk(ok, false);
+ if (type_ != Type::Value)
+ return std::nullopt;
+
+ long value;
+
+ if (!parseSignedInteger(value_, std::numeric_limits<int16_t>::min(),
+ std::numeric_limits<int16_t>::max(), &value))
+ return std::nullopt;
+
+ return value;
+}
+template<>
+std::optional<uint16_t> YamlObject::get() const
+{
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
- if (value_ == "")
- return defaultValue;
+ unsigned long value;
- /*
- * libyaml parses all scalar values as strings. When a string has
- * leading spaces before a minus sign, for example " -10", strtoul
- * skips leading spaces, accepts the leading minus sign, and the
- * calculated digits are negated as if by unary minus. Rule it out in
- * case the user gets a large number when the value is negative.
- */
- std::size_t found = value_.find_first_not_of(" \t");
- if (found != std::string::npos && value_[found] == '-')
- return defaultValue;
+ if (!parseUnsignedInteger(value_, std::numeric_limits<uint16_t>::max(),
+ &value))
+ return std::nullopt;
- char *end;
+ return value;
+}
- errno = 0;
- unsigned long value = std::strtoul(value_.c_str(), &end, 10);
+template<>
+std::optional<int32_t> YamlObject::get() const
+{
+ if (type_ != Type::Value)
+ return std::nullopt;
+
+ long value;
- if ('\0' != *end || errno == ERANGE ||
- value < std::numeric_limits<uint32_t>::min() ||
- value > std::numeric_limits<uint32_t>::max())
- return defaultValue;
+ if (!parseSignedInteger(value_, std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max(), &value))
+ return std::nullopt;
- setOk(ok, true);
return value;
}
template<>
-double YamlObject::get(const double &defaultValue, bool *ok) const
+std::optional<uint32_t> YamlObject::get() const
{
- setOk(ok, false);
+ if (type_ != Type::Value)
+ return std::nullopt;
+
+ unsigned long value;
+ if (!parseUnsignedInteger(value_, std::numeric_limits<uint32_t>::max(),
+ &value))
+ return std::nullopt;
+
+ return value;
+}
+
+template<>
+std::optional<double> YamlObject::get() const
+{
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
if (value_ == "")
- return defaultValue;
+ return std::nullopt;
char *end;
errno = 0;
- double value = std::strtod(value_.c_str(), &end);
+ double value = utils::strtod(value_.c_str(), &end);
if ('\0' != *end || errno == ERANGE)
- return defaultValue;
+ return std::nullopt;
- setOk(ok, true);
return value;
}
template<>
-std::string YamlObject::get(const std::string &defaultValue, bool *ok) const
+std::optional<std::string> YamlObject::get() const
{
- setOk(ok, false);
-
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
- setOk(ok, true);
return value_;
}
template<>
-Size YamlObject::get(const Size &defaultValue, bool *ok) const
+std::optional<Size> YamlObject::get() const
{
- setOk(ok, false);
-
if (type_ != Type::List)
- return defaultValue;
+ return std::nullopt;
if (list_.size() != 2)
- return defaultValue;
+ return std::nullopt;
- /*
- * Add a local variable to validate each dimension in case
- * that ok == nullptr.
- */
- bool valid;
- uint32_t width = list_[0]->get<uint32_t>(0, &valid);
- if (!valid)
- return defaultValue;
+ auto width = list_[0].value->get<uint32_t>();
+ if (!width)
+ return std::nullopt;
- uint32_t height = list_[1]->get<uint32_t>(0, &valid);
- if (!valid)
- return defaultValue;
+ auto height = list_[1].value->get<uint32_t>();
+ if (!height)
+ return std::nullopt;
- setOk(ok, true);
- return Size(width, height);
+ return Size(*width, *height);
}
#endif /* __DOXYGEN__ */
/**
+ * \fn template<typename T> YamlObject::getList<T>() const
+ * \brief Parse the YamlObject as a list of \a T
+ *
+ * This function parses the value of the YamlObject as a list of \a T objects,
+ * and returns the value as a \a std::vector<T>. If parsing fails, std::nullopt
+ * is returned.
+ *
+ * \return The YamlObject value as a std::vector<T>, or std::nullopt if parsing
+ * failed
+ */
+
+#ifndef __DOXYGEN__
+
+template<typename T,
+ std::enable_if_t<
+ std::is_same_v<bool, T> ||
+ std::is_same_v<double, T> ||
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T> ||
+ std::is_same_v<std::string, T> ||
+ std::is_same_v<Size, T>> *>
+std::optional<std::vector<T>> YamlObject::getList() const
+{
+ if (type_ != Type::List)
+ return std::nullopt;
+
+ std::vector<T> values;
+ values.reserve(list_.size());
+
+ for (const YamlObject &entry : asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+ values.emplace_back(*value);
+ }
+
+ return values;
+}
+
+template std::optional<std::vector<bool>> YamlObject::getList<bool>() const;
+template std::optional<std::vector<double>> YamlObject::getList<double>() const;
+template std::optional<std::vector<int8_t>> YamlObject::getList<int8_t>() const;
+template std::optional<std::vector<uint8_t>> YamlObject::getList<uint8_t>() const;
+template std::optional<std::vector<int16_t>> YamlObject::getList<int16_t>() const;
+template std::optional<std::vector<uint16_t>> YamlObject::getList<uint16_t>() const;
+template std::optional<std::vector<int32_t>> YamlObject::getList<int32_t>() const;
+template std::optional<std::vector<uint32_t>> YamlObject::getList<uint32_t>() const;
+template std::optional<std::vector<std::string>> YamlObject::getList<std::string>() const;
+template std::optional<std::vector<Size>> YamlObject::getList<Size>() const;
+
+#endif /* __DOXYGEN__ */
+
+/**
* \fn YamlObject::asDict() const
* \brief Wrap a dictionary YamlObject in an adapter that exposes iterators
*
@@ -379,7 +433,7 @@ const YamlObject &YamlObject::operator[](std::size_t index) const
if (type_ != Type::List || index >= size())
return empty;
- return *list_[index];
+ return *list_[index].value;
}
/**
@@ -395,7 +449,7 @@ const YamlObject &YamlObject::operator[](std::size_t index) const
*/
bool YamlObject::contains(const std::string &key) const
{
- if (dictionary_.find(key) == dictionary_.end())
+ if (dictionary_.find(std::ref(key)) == dictionary_.end())
return false;
return true;
@@ -622,7 +676,7 @@ int YamlParserContext::parseDictionaryOrList(YamlObject::Type type,
* Add a safety counter to make sure we don't loop indefinitely in case
* the YAML file is malformed.
*/
- for (unsigned int sentinel = 1000; sentinel; sentinel--) {
+ for (unsigned int sentinel = 2000; sentinel; sentinel--) {
auto evt = nextEvent();
if (!evt)
return -EINVAL;
@@ -667,16 +721,16 @@ int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr even
yamlObject.type_ = YamlObject::Type::List;
auto &list = yamlObject.list_;
auto handler = [this, &list](EventPtr evt) {
- list.emplace_back(new YamlObject());
- return parseNextYamlObject(*list.back(), std::move(evt));
+ list.emplace_back(std::string{}, std::make_unique<YamlObject>());
+ return parseNextYamlObject(*list.back().value, std::move(evt));
};
return parseDictionaryOrList(YamlObject::Type::List, handler);
}
case YAML_MAPPING_START_EVENT: {
yamlObject.type_ = YamlObject::Type::Dictionary;
- auto &dictionary = yamlObject.dictionary_;
- auto handler = [this, &dictionary](EventPtr evtKey) {
+ auto &list = yamlObject.list_;
+ auto handler = [this, &list](EventPtr evtKey) {
/* Parse key */
if (evtKey->type != YAML_SCALAR_EVENT) {
LOG(YamlParser, Error) << "Expect key at line: "
@@ -694,10 +748,19 @@ int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr even
if (!evtValue)
return -EINVAL;
- auto elem = dictionary.emplace(key, std::make_unique<YamlObject>());
- return parseNextYamlObject(*elem.first->second.get(), std::move(evtValue));
+ auto &elem = list.emplace_back(std::move(key),
+ std::make_unique<YamlObject>());
+ return parseNextYamlObject(*elem.value, std::move(evtValue));
};
- return parseDictionaryOrList(YamlObject::Type::Dictionary, handler);
+ int ret = parseDictionaryOrList(YamlObject::Type::Dictionary, handler);
+ if (ret)
+ return ret;
+
+ auto &dictionary = yamlObject.dictionary_;
+ for (const auto &elem : list)
+ dictionary.emplace(elem.key, elem.value.get());
+
+ return 0;
}
default:
@@ -753,6 +816,9 @@ int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr even
* The YamlParser::parse() function takes an open FILE, parses its contents, and
* returns a pointer to a YamlObject corresponding to the root node of the YAML
* document.
+ *
+ * The parser preserves the order of items in the YAML file, for both lists and
+ * dictionaries.
*/
/**
@@ -775,7 +841,9 @@ std::unique_ptr<YamlObject> YamlParser::parse(File &file)
std::unique_ptr<YamlObject> root(new YamlObject());
if (context.parseContent(*root)) {
- LOG(YamlParser, Error) << "Failed to parse YAML content";
+ LOG(YamlParser, Error)
+ << "Failed to parse YAML content from "
+ << file.fileName();
return nullptr;
}
diff --git a/src/meson.build b/src/meson.build
index 34663a6f..165a77bb 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -3,6 +3,7 @@
# Cache system paths
libcamera_datadir = get_option('datadir') / 'libcamera'
libcamera_libdir = get_option('libdir') / 'libcamera'
+libcamera_libexecdir = get_option('libexecdir') / 'libcamera'
libcamera_sysconfdir = get_option('sysconfdir') / 'libcamera'
config_h.set('LIBCAMERA_DATA_DIR', '"' + get_option('prefix') / libcamera_datadir + '"')
@@ -14,7 +15,7 @@ summary({
}, section : 'Paths')
# Module Signing
-openssl = find_program('openssl', required : true)
+openssl = find_program('openssl', required : false)
if openssl.found()
ipa_priv_key = custom_target('ipa-priv-key',
output : ['ipa-priv-key.pem'],
@@ -22,6 +23,7 @@ if openssl.found()
config_h.set('HAVE_IPA_PUBKEY', 1)
ipa_sign_module = true
else
+ warning('openssl not found, all IPA modules will be isolated')
ipa_sign_module = false
endif
@@ -31,10 +33,7 @@ subdir('libcamera')
subdir('android')
subdir('ipa')
-subdir('lc-compliance')
-
-subdir('cam')
-subdir('qcam')
+subdir('apps')
subdir('gstreamer')
subdir('py')
diff --git a/src/py/cam/cam.py b/src/py/cam/cam.py
index 2ae89fa8..ff4b7f66 100755
--- a/src/py/cam/cam.py
+++ b/src/py/cam/cam.py
@@ -3,9 +3,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
-# \todo Convert ctx and state dicts to proper classes, and move relevant
-# functions to those classes.
-
from typing import Any
import argparse
import binascii
@@ -26,6 +23,7 @@ class CameraContext:
opt_metadata: bool
opt_save_frames: bool
opt_capture: int
+ opt_orientation: str
stream_names: dict[libcam.Stream, str]
streams: list[libcam.Stream]
@@ -149,6 +147,21 @@ class CameraContext:
if 'pixelformat' in stream_opts:
stream_config.pixel_format = libcam.PixelFormat(stream_opts['pixelformat'])
+ if self.opt_orientation is not None:
+ orientation_map = {
+ 'rot0': libcam.Orientation.Rotate0,
+ 'rot180': libcam.Orientation.Rotate180,
+ 'mirror': libcam.Orientation.Rotate0Mirror,
+ 'flip': libcam.Orientation.Rotate180Mirror,
+ }
+
+ orient = orientation_map.get(self.opt_orientation, None)
+ if orient is None:
+ print('Bad orientation: ', self.opt_orientation)
+ sys.exit(-1)
+
+ camconfig.orientation = orient
+
stat = camconfig.validate()
if stat == libcam.CameraConfiguration.Status.Invalid:
@@ -161,9 +174,7 @@ class CameraContext:
print('Camera configuration adjusted')
- r = self.camera.configure(camconfig)
- if r != 0:
- raise Exception('Configure failed')
+ self.camera.configure(camconfig)
self.stream_names = {}
self.streams = []
@@ -178,12 +189,7 @@ class CameraContext:
allocator = libcam.FrameBufferAllocator(self.camera)
for stream in self.streams:
- ret = allocator.allocate(stream)
- if ret < 0:
- print('Cannot allocate buffers')
- exit(-1)
-
- allocated = len(allocator.buffers(stream))
+ allocated = allocator.allocate(stream)
print('{}-{}: Allocated {} buffers'.format(self.id, self.stream_names[stream], allocated))
@@ -208,10 +214,7 @@ class CameraContext:
buffers = self.allocator.buffers(stream)
buffer = buffers[buf_num]
- ret = request.add_buffer(stream, buffer)
- if ret < 0:
- print('Can not set buffer for request')
- exit(-1)
+ request.add_buffer(stream, buffer)
requests.append(request)
@@ -269,6 +272,11 @@ class CaptureState:
ctx.last = ts
ctx.fps = fps
+ if ctx.opt_metadata:
+ reqmeta = req.metadata
+ for ctrl, val in reqmeta.items():
+ print(f'\t{ctrl} = {val}')
+
for stream, fb in buffers.items():
stream_name = ctx.stream_names[stream]
@@ -287,11 +295,6 @@ class CaptureState:
'/'.join([str(p.bytes_used) for p in meta.planes]),
crcs))
- if ctx.opt_metadata:
- reqmeta = req.metadata
- for ctrl, val in reqmeta.items():
- print(f'\t{ctrl} = {val}')
-
if ctx.opt_save_frames:
with libcamera.utils.MappedFrameBuffer(fb) as mfb:
filename = 'frame-{}-{}-{}.data'.format(ctx.id, stream_name, ctx.reqs_completed)
@@ -398,6 +401,7 @@ def main():
parser.add_argument('--metadata', nargs=0, type=bool, action=CustomAction, help='Print the metadata for completed requests')
parser.add_argument('--strict-formats', type=bool, nargs=0, action=CustomAction, help='Do not allow requested stream format(s) to be adjusted')
parser.add_argument('-s', '--stream', nargs='+', action=CustomAction)
+ parser.add_argument('-o', '--orientation', help='Desired image orientation (rot0, rot180, mirror, flip)')
args = parser.parse_args()
cm = libcam.CameraManager.singleton()
@@ -421,6 +425,7 @@ def main():
ctx.opt_metadata = args.metadata.get(cam_idx, False)
ctx.opt_strict_formats = args.strict_formats.get(cam_idx, False)
ctx.opt_stream = args.stream.get(cam_idx, ['role=viewfinder'])
+ ctx.opt_orientation = args.orientation
contexts.append(ctx)
for ctx in contexts:
@@ -434,7 +439,10 @@ def main():
if args.info:
ctx.do_cmd_info()
- if args.capture:
+ # Filter out capture contexts which are not marked for capture
+ contexts = [ctx for ctx in contexts if ctx.opt_capture > 0]
+
+ if contexts:
state = CaptureState(cm, contexts)
if args.renderer == 'null':
diff --git a/src/py/cam/helpers.py b/src/py/cam/helpers.py
index 6b32a134..2d906667 100644
--- a/src/py/cam/helpers.py
+++ b/src/py/cam/helpers.py
@@ -117,14 +117,12 @@ def to_rgb(fmt, size, data):
bayer_pattern = fmt[1:5]
bitspp = int(fmt[5:])
- # \todo shifting leaves the lowest bits 0
if bitspp == 8:
data = data.reshape((h, w))
- data = data.astype(np.uint16) << 8
+ data = data.astype(np.uint16)
elif bitspp in [10, 12]:
data = data.view(np.uint16)
data = data.reshape((h, w))
- data = data << (16 - bitspp)
else:
raise Exception('Bad bitspp:' + str(bitspp))
@@ -145,7 +143,7 @@ def to_rgb(fmt, size, data):
b0 = (idx % 2, idx // 2)
rgb = demosaic(data, r0, g0, g1, b0)
- rgb = (rgb >> 8).astype(np.uint8)
+ rgb = (rgb >> (bitspp - 8)).astype(np.uint8)
else:
rgb = None
diff --git a/src/py/examples/simple-cam.py b/src/py/examples/simple-cam.py
index 2b81bb65..1cd1019d 100755
--- a/src/py/examples/simple-cam.py
+++ b/src/py/examples/simple-cam.py
@@ -19,8 +19,9 @@ TIMEOUT_SEC = 3
def handle_camera_event(cm):
- # cm.get_ready_requests() will not block here, as we know there is an event
- # to read.
+ # cm.get_ready_requests() returns the ready requests, which in our case
+ # should almost always return a single Request, but in some cases there
+ # could be multiple or none.
reqs = cm.get_ready_requests()
@@ -258,12 +259,7 @@ def main():
allocator = libcam.FrameBufferAllocator(camera)
for cfg in config:
- ret = allocator.allocate(cfg.stream)
- if ret < 0:
- print('Can\'t allocate buffers')
- return -1
-
- allocated = len(allocator.buffers(cfg.stream))
+ allocated = allocator.allocate(cfg.stream)
print(f'Allocated {allocated} buffers for stream')
# --------------------------------------------------------------------
@@ -288,15 +284,9 @@ def main():
requests = []
for i in range(len(buffers)):
request = camera.create_request()
- if not request:
- print('Can\'t create request')
- return -1
buffer = buffers[i]
- ret = request.add_buffer(stream, buffer)
- if ret < 0:
- print('Can\'t set buffer for request')
- return -1
+ request.add_buffer(stream, buffer)
# Controls can be added to a request on a per frame basis.
request.set_control(libcam.controls.Brightness, 0.5)
diff --git a/src/py/examples/simple-capture.py b/src/py/examples/simple-capture.py
index a6a9b33e..4b85408f 100755
--- a/src/py/examples/simple-capture.py
+++ b/src/py/examples/simple-capture.py
@@ -14,6 +14,7 @@
import argparse
import libcamera as libcam
+import selectors
import sys
# Number of frames to capture
@@ -42,8 +43,7 @@ def main():
# Acquire the camera for our use
- ret = cam.acquire()
- assert ret == 0
+ cam.acquire()
# Configure the camera
@@ -59,8 +59,7 @@ def main():
w, h = [int(v) for v in args.size.split('x')]
stream_config.size = libcam.Size(w, h)
- ret = cam.configure(cam_config)
- assert ret == 0
+ cam.configure(cam_config)
print(f'Capturing {TOTAL_FRAMES} frames with {stream_config}')
@@ -82,15 +81,13 @@ def main():
req = cam.create_request(i)
buffer = allocator.buffers(stream)[i]
- ret = req.add_buffer(stream, buffer)
- assert ret == 0
+ req.add_buffer(stream, buffer)
reqs.append(req)
# Start the camera
- ret = cam.start()
- assert ret == 0
+ cam.start()
# frames_queued and frames_done track the number of frames queued and done
@@ -100,18 +97,24 @@ def main():
# Queue the requests to the camera
for req in reqs:
- ret = cam.queue_request(req)
- assert ret == 0
+ cam.queue_request(req)
frames_queued += 1
# The main loop. Wait for the queued Requests to complete, process them,
# and re-queue them again.
+ sel = selectors.DefaultSelector()
+ sel.register(cm.event_fd, selectors.EVENT_READ)
+
while frames_done < TOTAL_FRAMES:
- # cm.get_ready_requests() blocks until there is an event and returns
- # all the ready requests. Here we should almost always get a single
+ # cm.get_ready_requests() does not block, so we use a Selector to wait
+ # for a camera event. Here we should almost always get a single
# Request, but in some cases there could be multiple or none.
+ events = sel.select()
+ if not events:
+ continue
+
reqs = cm.get_ready_requests()
for req in reqs:
@@ -147,13 +150,11 @@ def main():
# Stop the camera
- ret = cam.stop()
- assert ret == 0
+ cam.stop()
# Release the camera
- ret = cam.release()
- assert ret == 0
+ cam.release()
return 0
diff --git a/src/py/examples/simple-continuous-capture.py b/src/py/examples/simple-continuous-capture.py
index fe78a2dd..e1cb931e 100755
--- a/src/py/examples/simple-continuous-capture.py
+++ b/src/py/examples/simple-continuous-capture.py
@@ -28,8 +28,7 @@ class CameraCaptureContext:
# Acquire the camera for our use
- ret = cam.acquire()
- assert ret == 0
+ cam.acquire()
# Configure the camera
@@ -37,8 +36,7 @@ class CameraCaptureContext:
stream_config = cam_config.at(0)
- ret = cam.configure(cam_config)
- assert ret == 0
+ cam.configure(cam_config)
stream = stream_config.stream
@@ -62,8 +60,7 @@ class CameraCaptureContext:
req = cam.create_request(idx)
buffer = allocator.buffers(stream)[i]
- ret = req.add_buffer(stream, buffer)
- assert ret == 0
+ req.add_buffer(stream, buffer)
self.reqs.append(req)
@@ -73,13 +70,11 @@ class CameraCaptureContext:
def uninit_camera(self):
# Stop the camera
- ret = self.cam.stop()
- assert ret == 0
+ self.cam.stop()
# Release the camera
- ret = self.cam.release()
- assert ret == 0
+ self.cam.release()
# A container class for our state
@@ -88,8 +83,9 @@ class CaptureContext:
camera_contexts: list[CameraCaptureContext] = []
def handle_camera_event(self):
- # cm.get_ready_requests() will not block here, as we know there is an event
- # to read.
+ # cm.get_ready_requests() returns the ready requests, which in our case
+ # should almost always return a single Request, but in some cases there
+ # could be multiple or none.
reqs = self.cm.get_ready_requests()
@@ -144,8 +140,7 @@ class CaptureContext:
for cam_ctx in self.camera_contexts:
for req in cam_ctx.reqs:
- ret = cam_ctx.cam.queue_request(req)
- assert ret == 0
+ cam_ctx.cam.queue_request(req)
# Use Selector to wait for events from the camera and from the keyboard
@@ -176,8 +171,7 @@ def main():
# Start the cameras
for cam_ctx in ctx.camera_contexts:
- ret = cam_ctx.cam.start()
- assert ret == 0
+ cam_ctx.cam.start()
ctx.capture()
diff --git a/src/py/libcamera/gen-py-controls.py b/src/py/libcamera/gen-py-controls.py
index 99f3bbcf..8efbf95b 100755
--- a/src/py/libcamera/gen-py-controls.py
+++ b/src/py/libcamera/gen-py-controls.py
@@ -24,48 +24,57 @@ def find_common_prefix(strings):
def generate_py(controls, mode):
out = ''
- for ctrl in controls:
- name, ctrl = ctrl.popitem()
-
- if ctrl.get('draft'):
- ns = 'libcamera::{}::draft::'.format(mode)
- container = 'draft'
- else:
- ns = 'libcamera::{}::'.format(mode)
- container = 'controls'
+ vendors_class_def = []
+ vendor_defs = []
+ vendors = []
+ for vendor, ctrl_list in controls.items():
+ for ctrls in ctrl_list:
+ name, ctrl = ctrls.popitem()
+
+ if vendor not in vendors and vendor != 'libcamera':
+ vendor_mode_str = f'{vendor.capitalize()}{mode.capitalize()}'
+ vendors_class_def.append('class Py{}\n{{\n}};\n'.format(vendor_mode_str))
+ vendor_defs.append('\tauto {} = py::class_<Py{}>(controls, \"{}\");'.format(vendor, vendor_mode_str, vendor))
+ vendors.append(vendor)
+
+ if vendor != 'libcamera':
+ ns = 'libcamera::{}::{}::'.format(mode, vendor)
+ container = vendor
+ else:
+ ns = 'libcamera::{}::'.format(mode)
+ container = 'controls'
- out += f'\t{container}.def_readonly_static("{name}", static_cast<const libcamera::ControlId *>(&{ns}{name}));\n\n'
+ out += f'\t{container}.def_readonly_static("{name}", static_cast<const libcamera::ControlId *>(&{ns}{name}));\n\n'
- enum = ctrl.get('enum')
- if not enum:
- continue
+ enum = ctrl.get('enum')
+ if not enum:
+ continue
- cpp_enum = name + 'Enum'
+ cpp_enum = name + 'Enum'
- out += '\tpy::enum_<{}{}>({}, \"{}\")\n'.format(ns, cpp_enum, container, cpp_enum)
+ out += '\tpy::enum_<{}{}>({}, \"{}\")\n'.format(ns, cpp_enum, container, cpp_enum)
- if mode == 'controls':
- # Adjustments for controls
- if name == 'LensShadingMapMode':
- prefix = 'LensShadingMapMode'
- elif name == 'SceneFlicker':
- # If we strip the prefix, we would get '50Hz', which is illegal name
- prefix = ''
+ if mode == 'controls':
+ # Adjustments for controls
+ if name == 'LensShadingMapMode':
+ prefix = 'LensShadingMapMode'
+ else:
+ prefix = find_common_prefix([e['name'] for e in enum])
else:
+ # Adjustments for properties
prefix = find_common_prefix([e['name'] for e in enum])
- else:
- # Adjustments for properties
- prefix = find_common_prefix([e['name'] for e in enum])
- for entry in enum:
- cpp_enum = entry['name']
- py_enum = entry['name'][len(prefix):]
+ for entry in enum:
+ cpp_enum = entry['name']
+ py_enum = entry['name'][len(prefix):]
- out += '\t\t.value(\"{}\", {}{})\n'.format(py_enum, ns, cpp_enum)
+ out += '\t\t.value(\"{}\", {}{})\n'.format(py_enum, ns, cpp_enum)
- out += '\t;\n\n'
+ out += '\t;\n\n'
- return {'controls': out}
+ return {'controls': out,
+ 'vendors_class_def': '\n'.join(vendors_class_def),
+ 'vendors_defs': '\n'.join(vendor_defs)}
def fill_template(template, data):
@@ -78,22 +87,25 @@ def fill_template(template, data):
def main(argv):
# Parse command line arguments
parser = argparse.ArgumentParser()
- parser.add_argument('-o', dest='output', metavar='file', type=str,
+ parser.add_argument('--mode', '-m', type=str, required=True,
+ help='Mode is either "controls" or "properties"')
+ parser.add_argument('--output', '-o', metavar='file', type=str,
help='Output file name. Defaults to standard output if not specified.')
- parser.add_argument('input', type=str,
- help='Input file name.')
- parser.add_argument('template', type=str,
+ parser.add_argument('--template', '-t', type=str, required=True,
help='Template file name.')
- parser.add_argument('--mode', type=str, required=True,
- help='Mode is either "controls" or "properties"')
+ parser.add_argument('input', type=str, nargs='+',
+ help='Input file name.')
args = parser.parse_args(argv[1:])
if args.mode not in ['controls', 'properties']:
print(f'Invalid mode option "{args.mode}"', file=sys.stderr)
return -1
- data = open(args.input, 'rb').read()
- controls = yaml.safe_load(data)['controls']
+ controls = {}
+ for input in args.input:
+ data = open(input, 'rb').read()
+ vendor = yaml.safe_load(data)['vendor']
+ controls[vendor] = yaml.safe_load(data)['controls']
data = generate_py(controls, args.mode)
diff --git a/src/py/libcamera/meson.build b/src/py/libcamera/meson.build
index eb884538..4807ca7d 100644
--- a/src/py/libcamera/meson.build
+++ b/src/py/libcamera/meson.build
@@ -7,42 +7,56 @@ if not py3_dep.found()
subdir_done()
endif
-pycamera_enabled = true
+pybind11_dep = dependency('pybind11', required : get_option('pycamera'))
+
+if not pybind11_dep.found()
+ pycamera_enabled = false
+ subdir_done()
+endif
-pybind11_proj = subproject('pybind11')
-pybind11_dep = pybind11_proj.get_variable('pybind11_dep')
+pycamera_enabled = true
pycamera_sources = files([
+ 'py_camera_manager.cpp',
+ 'py_color_space.cpp',
'py_enums.cpp',
'py_geometry.cpp',
+ 'py_helpers.cpp',
'py_main.cpp',
+ 'py_transform.cpp',
])
# Generate controls
-gen_py_controls_input_files = files([
- '../../libcamera/control_ids.yaml',
- 'py_controls_generated.cpp.in',
-])
+gen_py_controls_input_files = []
+gen_py_controls_template = files('py_controls_generated.cpp.in')
gen_py_controls = files('gen-py-controls.py')
+foreach file : controls_files
+ gen_py_controls_input_files += files('../../libcamera/' + file)
+endforeach
+
pycamera_sources += custom_target('py_gen_controls',
input : gen_py_controls_input_files,
output : ['py_controls_generated.cpp'],
- command : [gen_py_controls, '--mode', 'controls', '-o', '@OUTPUT@', '@INPUT@'])
+ command : [gen_py_controls, '--mode', 'controls', '-o', '@OUTPUT@',
+ '-t', gen_py_controls_template, '@INPUT@'])
# Generate properties
-gen_py_property_enums_input_files = files([
- '../../libcamera/property_ids.yaml',
- 'py_properties_generated.cpp.in',
-])
+gen_py_property_enums_input_files = []
+gen_py_properties_template = files('py_properties_generated.cpp.in')
+
+foreach file : properties_files
+ gen_py_property_enums_input_files += files('../../libcamera/' + file)
+endforeach
pycamera_sources += custom_target('py_gen_properties',
input : gen_py_property_enums_input_files,
output : ['py_properties_generated.cpp'],
- command : [gen_py_controls, '--mode', 'properties', '-o', '@OUTPUT@', '@INPUT@'])
+ command : [gen_py_controls, '--mode', 'properties', '-o', '@OUTPUT@',
+ '-t', gen_py_properties_template, '@INPUT@'])
# Generate formats
@@ -59,7 +73,7 @@ pycamera_sources += custom_target('py_gen_formats',
command : [gen_py_formats, '-o', '@OUTPUT@', '@INPUT@'])
pycamera_deps = [
- libcamera_public,
+ libcamera_private,
py3_dep,
pybind11_dep,
]
@@ -68,7 +82,6 @@ pycamera_args = [
'-fvisibility=hidden',
'-Wno-shadow',
'-DPYBIND11_USE_SMART_HOLDER_AS_DEFAULT',
- '-DLIBCAMERA_BASE_PRIVATE',
]
destdir = get_option('libdir') / ('python' + py3_dep.version()) / 'site-packages' / 'libcamera'
@@ -77,6 +90,7 @@ pycamera = shared_module('_libcamera',
pycamera_sources,
install : true,
install_dir : destdir,
+ install_tag : 'python-runtime',
name_prefix : '',
dependencies : pycamera_deps,
cpp_args : pycamera_args)
@@ -86,13 +100,15 @@ pycamera = shared_module('_libcamera',
run_command('ln', '-fsrT', files('__init__.py'),
meson.current_build_dir() / '__init__.py',
- check: true)
+ check : true)
run_command('ln', '-fsrT', meson.current_source_dir() / 'utils',
meson.current_build_dir() / 'utils',
- check: true)
+ check : true)
-install_data(['__init__.py'], install_dir : destdir)
+install_data(['__init__.py'],
+ install_dir : destdir,
+ install_tag : 'python-runtime')
# \todo Generate stubs when building. See https://peps.python.org/pep-0484/#stub-files
# Note: Depends on pybind11-stubgen. To generate pylibcamera stubs:
diff --git a/src/py/libcamera/py_camera_manager.cpp b/src/py/libcamera/py_camera_manager.cpp
new file mode 100644
index 00000000..9ccb7aad
--- /dev/null
+++ b/src/py/libcamera/py_camera_manager.cpp
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include "py_camera_manager.h"
+
+#include <errno.h>
+#include <memory>
+#include <sys/eventfd.h>
+#include <system_error>
+#include <unistd.h>
+#include <vector>
+
+#include "py_main.h"
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+PyCameraManager::PyCameraManager()
+{
+ LOG(Python, Debug) << "PyCameraManager()";
+
+ cameraManager_ = std::make_unique<CameraManager>();
+
+ int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (fd == -1)
+ throw std::system_error(errno, std::generic_category(),
+ "Failed to create eventfd");
+
+ eventFd_ = UniqueFD(fd);
+
+ int ret = cameraManager_->start();
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to start CameraManager");
+}
+
+PyCameraManager::~PyCameraManager()
+{
+ LOG(Python, Debug) << "~PyCameraManager()";
+}
+
+py::list PyCameraManager::cameras()
+{
+ /*
+ * Create a list of Cameras, where each camera has a keep-alive to
+ * CameraManager.
+ */
+ py::list l;
+
+ for (auto &camera : cameraManager_->cameras()) {
+ py::object py_cm = py::cast(this);
+ py::object py_cam = py::cast(camera);
+ py::detail::keep_alive_impl(py_cam, py_cm);
+ l.append(py_cam);
+ }
+
+ return l;
+}
+
+std::vector<py::object> PyCameraManager::getReadyRequests()
+{
+ int ret = readFd();
+
+ if (ret == -EAGAIN)
+ return std::vector<py::object>();
+
+ if (ret != 0)
+ throw std::system_error(-ret, std::generic_category());
+
+ std::vector<py::object> py_reqs;
+
+ for (Request *request : getCompletedRequests()) {
+ py::object o = py::cast(request);
+ /* Decrease the ref increased in Camera.queue_request() */
+ o.dec_ref();
+ py_reqs.push_back(o);
+ }
+
+ return py_reqs;
+}
+
+/* Note: Called from another thread */
+void PyCameraManager::handleRequestCompleted(Request *req)
+{
+ pushRequest(req);
+ writeFd();
+}
+
+void PyCameraManager::writeFd()
+{
+ uint64_t v = 1;
+
+ size_t s = write(eventFd_.get(), &v, 8);
+ /*
+ * We should never fail, and have no simple means to manage the error,
+ * so let's log a fatal error.
+ */
+ if (s != 8)
+ LOG(Python, Fatal) << "Unable to write to eventfd";
+}
+
+int PyCameraManager::readFd()
+{
+ uint8_t buf[8];
+
+ ssize_t ret = read(eventFd_.get(), buf, 8);
+
+ if (ret == 8)
+ return 0;
+ else if (ret < 0)
+ return -errno;
+ else
+ return -EIO;
+}
+
+void PyCameraManager::pushRequest(Request *req)
+{
+ MutexLocker guard(completedRequestsMutex_);
+ completedRequests_.push_back(req);
+}
+
+std::vector<Request *> PyCameraManager::getCompletedRequests()
+{
+ std::vector<Request *> v;
+ MutexLocker guard(completedRequestsMutex_);
+ swap(v, completedRequests_);
+ return v;
+}
diff --git a/src/py/libcamera/py_camera_manager.h b/src/py/libcamera/py_camera_manager.h
new file mode 100644
index 00000000..3574db23
--- /dev/null
+++ b/src/py/libcamera/py_camera_manager.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#pragma once
+
+#include <libcamera/base/mutex.h>
+
+#include <libcamera/libcamera.h>
+
+#include <pybind11/pybind11.h>
+
+using namespace libcamera;
+
+class PyCameraManager
+{
+public:
+ PyCameraManager();
+ ~PyCameraManager();
+
+ pybind11::list cameras();
+ std::shared_ptr<Camera> get(const std::string &name) { return cameraManager_->get(name); }
+
+ static const std::string &version() { return CameraManager::version(); }
+
+ int eventFd() const { return eventFd_.get(); }
+
+ std::vector<pybind11::object> getReadyRequests();
+
+ void handleRequestCompleted(Request *req);
+
+private:
+ std::unique_ptr<CameraManager> cameraManager_;
+
+ UniqueFD eventFd_;
+ libcamera::Mutex completedRequestsMutex_;
+ std::vector<Request *> completedRequests_
+ LIBCAMERA_TSA_GUARDED_BY(completedRequestsMutex_);
+
+ void writeFd();
+ int readFd();
+ void pushRequest(Request *req);
+ std::vector<Request *> getCompletedRequests();
+};
diff --git a/src/py/libcamera/py_color_space.cpp b/src/py/libcamera/py_color_space.cpp
new file mode 100644
index 00000000..5201121a
--- /dev/null
+++ b/src/py/libcamera/py_color_space.cpp
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings - Color Space classes
+ */
+
+#include <libcamera/color_space.h>
+#include <libcamera/libcamera.h>
+
+#include <pybind11/operators.h>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+void init_py_color_space(py::module &m)
+{
+ auto pyColorSpace = py::class_<ColorSpace>(m, "ColorSpace");
+ auto pyColorSpacePrimaries = py::enum_<ColorSpace::Primaries>(pyColorSpace, "Primaries");
+ auto pyColorSpaceTransferFunction = py::enum_<ColorSpace::TransferFunction>(pyColorSpace, "TransferFunction");
+ auto pyColorSpaceYcbcrEncoding = py::enum_<ColorSpace::YcbcrEncoding>(pyColorSpace, "YcbcrEncoding");
+ auto pyColorSpaceRange = py::enum_<ColorSpace::Range>(pyColorSpace, "Range");
+
+ pyColorSpace
+ .def(py::init([](ColorSpace::Primaries primaries,
+ ColorSpace::TransferFunction transferFunction,
+ ColorSpace::YcbcrEncoding ycbcrEncoding,
+ ColorSpace::Range range) {
+ return ColorSpace(primaries, transferFunction, ycbcrEncoding, range);
+ }), py::arg("primaries"), py::arg("transferFunction"),
+ py::arg("ycbcrEncoding"), py::arg("range"))
+ .def(py::init([](ColorSpace &other) { return other; }))
+ .def("__str__", [](ColorSpace &self) {
+ return "<libcamera.ColorSpace '" + self.toString() + "'>";
+ })
+ .def_readwrite("primaries", &ColorSpace::primaries)
+ .def_readwrite("transferFunction", &ColorSpace::transferFunction)
+ .def_readwrite("ycbcrEncoding", &ColorSpace::ycbcrEncoding)
+ .def_readwrite("range", &ColorSpace::range)
+ .def_static("Raw", []() { return ColorSpace::Raw; })
+ .def_static("Srgb", []() { return ColorSpace::Srgb; })
+ .def_static("Sycc", []() { return ColorSpace::Sycc; })
+ .def_static("Smpte170m", []() { return ColorSpace::Smpte170m; })
+ .def_static("Rec709", []() { return ColorSpace::Rec709; })
+ .def_static("Rec2020", []() { return ColorSpace::Rec2020; });
+
+ pyColorSpacePrimaries
+ .value("Raw", ColorSpace::Primaries::Raw)
+ .value("Smpte170m", ColorSpace::Primaries::Smpte170m)
+ .value("Rec709", ColorSpace::Primaries::Rec709)
+ .value("Rec2020", ColorSpace::Primaries::Rec2020);
+
+ pyColorSpaceTransferFunction
+ .value("Linear", ColorSpace::TransferFunction::Linear)
+ .value("Srgb", ColorSpace::TransferFunction::Srgb)
+ .value("Rec709", ColorSpace::TransferFunction::Rec709);
+
+ pyColorSpaceYcbcrEncoding
+ .value("Null", ColorSpace::YcbcrEncoding::None)
+ .value("Rec601", ColorSpace::YcbcrEncoding::Rec601)
+ .value("Rec709", ColorSpace::YcbcrEncoding::Rec709)
+ .value("Rec2020", ColorSpace::YcbcrEncoding::Rec2020);
+
+ pyColorSpaceRange
+ .value("Full", ColorSpace::Range::Full)
+ .value("Limited", ColorSpace::Range::Limited);
+}
diff --git a/src/py/libcamera/py_controls_generated.cpp.in b/src/py/libcamera/py_controls_generated.cpp.in
index cb8442ba..8d282ce5 100644
--- a/src/py/libcamera/py_controls_generated.cpp.in
+++ b/src/py/libcamera/py_controls_generated.cpp.in
@@ -9,7 +9,7 @@
#include <libcamera/control_ids.h>
-#include <pybind11/smart_holder.h>
+#include <pybind11/pybind11.h>
namespace py = pybind11;
@@ -17,14 +17,12 @@ class PyControls
{
};
-class PyDraftControls
-{
-};
+${vendors_class_def}
void init_py_controls_generated(py::module& m)
{
auto controls = py::class_<PyControls>(m, "controls");
- auto draft = py::class_<PyDraftControls>(controls, "draft");
+${vendors_defs}
${controls}
}
diff --git a/src/py/libcamera/py_enums.cpp b/src/py/libcamera/py_enums.cpp
index 96d4beef..e25689c6 100644
--- a/src/py/libcamera/py_enums.cpp
+++ b/src/py/libcamera/py_enums.cpp
@@ -7,7 +7,7 @@
#include <libcamera/libcamera.h>
-#include <pybind11/smart_holder.h>
+#include <pybind11/pybind11.h>
namespace py = pybind11;
@@ -31,4 +31,14 @@ void init_py_enums(py::module &m)
.value("String", ControlType::ControlTypeString)
.value("Rectangle", ControlType::ControlTypeRectangle)
.value("Size", ControlType::ControlTypeSize);
+
+ py::enum_<Orientation>(m, "Orientation")
+ .value("Rotate0", Orientation::Rotate0)
+ .value("Rotate0Mirror", Orientation::Rotate0Mirror)
+ .value("Rotate180", Orientation::Rotate180)
+ .value("Rotate180Mirror", Orientation::Rotate180Mirror)
+ .value("Rotate90Mirror", Orientation::Rotate90Mirror)
+ .value("Rotate270", Orientation::Rotate270)
+ .value("Rotate270Mirror", Orientation::Rotate270Mirror)
+ .value("Rotate90", Orientation::Rotate90);
}
diff --git a/src/py/libcamera/py_formats_generated.cpp.in b/src/py/libcamera/py_formats_generated.cpp.in
index b88807f3..a3f7f94d 100644
--- a/src/py/libcamera/py_formats_generated.cpp.in
+++ b/src/py/libcamera/py_formats_generated.cpp.in
@@ -9,7 +9,7 @@
#include <libcamera/formats.h>
-#include <pybind11/smart_holder.h>
+#include <pybind11/pybind11.h>
namespace py = pybind11;
diff --git a/src/py/libcamera/py_geometry.cpp b/src/py/libcamera/py_geometry.cpp
index 84b0cb08..5c2aeac4 100644
--- a/src/py/libcamera/py_geometry.cpp
+++ b/src/py/libcamera/py_geometry.cpp
@@ -11,7 +11,7 @@
#include <libcamera/libcamera.h>
#include <pybind11/operators.h>
-#include <pybind11/smart_holder.h>
+#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
namespace py = pybind11;
diff --git a/src/py/libcamera/py_helpers.cpp b/src/py/libcamera/py_helpers.cpp
new file mode 100644
index 00000000..79891ab6
--- /dev/null
+++ b/src/py/libcamera/py_helpers.cpp
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include "py_helpers.h"
+
+#include <libcamera/libcamera.h>
+
+#include <pybind11/functional.h>
+#include <pybind11/stl.h>
+#include <pybind11/stl_bind.h>
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+template<typename T>
+static py::object valueOrTuple(const ControlValue &cv)
+{
+ if (cv.isArray()) {
+ const T *v = reinterpret_cast<const T *>(cv.data().data());
+ auto t = py::tuple(cv.numElements());
+
+ for (size_t i = 0; i < cv.numElements(); ++i)
+ t[i] = v[i];
+
+ return std::move(t);
+ }
+
+ return py::cast(cv.get<T>());
+}
+
+py::object controlValueToPy(const ControlValue &cv)
+{
+ switch (cv.type()) {
+ case ControlTypeBool:
+ return valueOrTuple<bool>(cv);
+ case ControlTypeByte:
+ return valueOrTuple<uint8_t>(cv);
+ case ControlTypeInteger32:
+ return valueOrTuple<int32_t>(cv);
+ case ControlTypeInteger64:
+ return valueOrTuple<int64_t>(cv);
+ case ControlTypeFloat:
+ return valueOrTuple<float>(cv);
+ case ControlTypeString:
+ return py::cast(cv.get<std::string>());
+ case ControlTypeRectangle:
+ return valueOrTuple<Rectangle>(cv);
+ case ControlTypeSize: {
+ const Size *v = reinterpret_cast<const Size *>(cv.data().data());
+ return py::cast(v);
+ }
+ case ControlTypeNone:
+ return py::none();
+ default:
+ throw std::runtime_error("Unsupported ControlValue type");
+ }
+}
+
+template<typename T>
+static ControlValue controlValueMaybeArray(const py::object &ob)
+{
+ if (py::isinstance<py::list>(ob) || py::isinstance<py::tuple>(ob)) {
+ std::vector<T> vec = ob.cast<std::vector<T>>();
+ return ControlValue(Span<const T>(vec));
+ }
+
+ return ControlValue(ob.cast<T>());
+}
+
+ControlValue pyToControlValue(const py::object &ob, ControlType type)
+{
+ switch (type) {
+ case ControlTypeBool:
+ return ControlValue(ob.cast<bool>());
+ case ControlTypeByte:
+ return controlValueMaybeArray<uint8_t>(ob);
+ case ControlTypeInteger32:
+ return controlValueMaybeArray<int32_t>(ob);
+ case ControlTypeInteger64:
+ return controlValueMaybeArray<int64_t>(ob);
+ case ControlTypeFloat:
+ return controlValueMaybeArray<float>(ob);
+ case ControlTypeString:
+ return ControlValue(ob.cast<std::string>());
+ case ControlTypeRectangle:
+ return controlValueMaybeArray<Rectangle>(ob);
+ case ControlTypeSize:
+ return ControlValue(ob.cast<Size>());
+ case ControlTypeNone:
+ return ControlValue();
+ default:
+ throw std::runtime_error("Control type not implemented");
+ }
+}
diff --git a/src/py/libcamera/py_helpers.h b/src/py/libcamera/py_helpers.h
new file mode 100644
index 00000000..983969df
--- /dev/null
+++ b/src/py/libcamera/py_helpers.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#pragma once
+
+#include <libcamera/libcamera.h>
+
+#include <pybind11/pybind11.h>
+
+pybind11::object controlValueToPy(const libcamera::ControlValue &cv);
+libcamera::ControlValue pyToControlValue(const pybind11::object &ob, libcamera::ControlType type);
diff --git a/src/py/libcamera/py_main.cpp b/src/py/libcamera/py_main.cpp
index 505cc3dc..bce08218 100644
--- a/src/py/libcamera/py_main.cpp
+++ b/src/py/libcamera/py_main.cpp
@@ -5,132 +5,93 @@
* Python bindings
*/
-#include <mutex>
+#include "py_main.h"
+
+#include <memory>
#include <stdexcept>
-#include <sys/eventfd.h>
-#include <unistd.h>
+#include <string>
+#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/libcamera.h>
#include <pybind11/functional.h>
-#include <pybind11/smart_holder.h>
+#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
+#include "py_camera_manager.h"
+#include "py_helpers.h"
+
namespace py = pybind11;
using namespace libcamera;
-template<typename T>
-static py::object valueOrTuple(const ControlValue &cv)
-{
- if (cv.isArray()) {
- const T *v = reinterpret_cast<const T *>(cv.data().data());
- auto t = py::tuple(cv.numElements());
+namespace libcamera {
- for (size_t i = 0; i < cv.numElements(); ++i)
- t[i] = v[i];
+LOG_DEFINE_CATEGORY(Python)
- return std::move(t);
- }
-
- return py::cast(cv.get<T>());
}
-static py::object controlValueToPy(const ControlValue &cv)
+/*
+ * This is a holder class used only for the Camera class, for the sole purpose
+ * of avoiding the compilation issue with Camera's private destructor.
+ *
+ * pybind11 requires a public destructor for classes held with shared_ptrs, even
+ * in cases where the public destructor is not strictly needed. The current
+ * understanding is that there are the following options to solve the problem:
+ *
+ * - Use pybind11 'smart_holder' branch. The downside is that 'smart_holder'
+ * is not the mainline branch, and not available in distributions.
+ * - https://github.com/pybind/pybind11/pull/2067
+ * - Make the Camera destructor public
+ * - Something like the PyCameraSmartPtr here, which adds a layer, hiding the
+ * issue.
+ */
+template<typename T>
+class PyCameraSmartPtr
{
- switch (cv.type()) {
- case ControlTypeBool:
- return valueOrTuple<bool>(cv);
- case ControlTypeByte:
- return valueOrTuple<uint8_t>(cv);
- case ControlTypeInteger32:
- return valueOrTuple<int32_t>(cv);
- case ControlTypeInteger64:
- return valueOrTuple<int64_t>(cv);
- case ControlTypeFloat:
- return valueOrTuple<float>(cv);
- case ControlTypeString:
- return py::cast(cv.get<std::string>());
- case ControlTypeRectangle: {
- const Rectangle *v = reinterpret_cast<const Rectangle *>(cv.data().data());
- return py::cast(v);
- }
- case ControlTypeSize: {
- const Size *v = reinterpret_cast<const Size *>(cv.data().data());
- return py::cast(v);
+public:
+ using element_type = T;
+
+ PyCameraSmartPtr()
+ {
}
- case ControlTypeNone:
- default:
- throw std::runtime_error("Unsupported ControlValue type");
+
+ explicit PyCameraSmartPtr(T *)
+ {
+ throw std::runtime_error("invalid SmartPtr constructor call");
}
-}
-template<typename T>
-static ControlValue controlValueMaybeArray(const py::object &ob)
-{
- if (py::isinstance<py::list>(ob) || py::isinstance<py::tuple>(ob)) {
- std::vector<T> vec = ob.cast<std::vector<T>>();
- return ControlValue(Span<const T>(vec));
+ explicit PyCameraSmartPtr(std::shared_ptr<T> p)
+ : ptr_(p)
+ {
}
- return ControlValue(ob.cast<T>());
-}
+ T *get() const { return ptr_.get(); }
-static ControlValue pyToControlValue(const py::object &ob, ControlType type)
-{
- switch (type) {
- case ControlTypeBool:
- return ControlValue(ob.cast<bool>());
- case ControlTypeByte:
- return controlValueMaybeArray<uint8_t>(ob);
- case ControlTypeInteger32:
- return controlValueMaybeArray<int32_t>(ob);
- case ControlTypeInteger64:
- return controlValueMaybeArray<int64_t>(ob);
- case ControlTypeFloat:
- return controlValueMaybeArray<float>(ob);
- case ControlTypeString:
- return ControlValue(ob.cast<std::string>());
- case ControlTypeRectangle:
- return ControlValue(ob.cast<Rectangle>());
- case ControlTypeSize:
- return ControlValue(ob.cast<Size>());
- case ControlTypeNone:
- default:
- throw std::runtime_error("Control type not implemented");
- }
-}
+ operator std::shared_ptr<T>() const { return ptr_; }
-static std::weak_ptr<CameraManager> gCameraManager;
-static int gEventfd;
-static std::mutex gReqlistMutex;
-static std::vector<Request *> gReqList;
+private:
+ std::shared_ptr<T> ptr_;
+};
-static void handleRequestCompleted(Request *req)
-{
- {
- std::lock_guard guard(gReqlistMutex);
- gReqList.push_back(req);
- }
+PYBIND11_DECLARE_HOLDER_TYPE(T, PyCameraSmartPtr<T>)
- uint64_t v = 1;
- size_t s = write(gEventfd, &v, 8);
- /*
- * We should never fail, and have no simple means to manage the error,
- * so let's use LOG(Fatal).
- */
- if (s != 8)
- LOG(Fatal) << "Unable to write to eventfd";
-}
+/*
+ * Note: global C++ destructors can be ran on this before the py module is
+ * destructed.
+ */
+static std::weak_ptr<PyCameraManager> gCameraManager;
-void init_py_enums(py::module &m);
+void init_py_color_space(py::module &m);
void init_py_controls_generated(py::module &m);
+void init_py_enums(py::module &m);
void init_py_formats_generated(py::module &m);
void init_py_geometry(py::module &m);
void init_py_properties_generated(py::module &m);
+void init_py_transform(py::module &m);
PYBIND11_MODULE(_libcamera, m)
{
@@ -138,6 +99,8 @@ PYBIND11_MODULE(_libcamera, m)
init_py_controls_generated(m);
init_py_geometry(m);
init_py_properties_generated(m);
+ init_py_color_space(m);
+ init_py_transform(m);
/* Forward declarations */
@@ -147,8 +110,9 @@ PYBIND11_MODULE(_libcamera, m)
* https://pybind11.readthedocs.io/en/latest/advanced/misc.html#avoiding-c-types-in-docstrings
*/
- auto pyCameraManager = py::class_<CameraManager>(m, "CameraManager");
- auto pyCamera = py::class_<Camera>(m, "Camera");
+ auto pyCameraManager = py::class_<PyCameraManager, std::shared_ptr<PyCameraManager>>(m, "CameraManager");
+ auto pyCamera = py::class_<Camera, PyCameraSmartPtr<Camera>>(m, "Camera");
+ auto pySensorConfiguration = py::class_<SensorConfiguration>(m, "SensorConfiguration");
auto pyCameraConfiguration = py::class_<CameraConfiguration>(m, "CameraConfiguration");
auto pyCameraConfigurationStatus = py::enum_<CameraConfiguration::Status>(pyCameraConfiguration, "Status");
auto pyStreamConfiguration = py::class_<StreamConfiguration>(m, "StreamConfiguration");
@@ -165,12 +129,6 @@ PYBIND11_MODULE(_libcamera, m)
auto pyFrameMetadata = py::class_<FrameMetadata>(m, "FrameMetadata");
auto pyFrameMetadataStatus = py::enum_<FrameMetadata::Status>(pyFrameMetadata, "Status");
auto pyFrameMetadataPlane = py::class_<FrameMetadata::Plane>(pyFrameMetadata, "Plane");
- auto pyTransform = py::class_<Transform>(m, "Transform");
- auto pyColorSpace = py::class_<ColorSpace>(m, "ColorSpace");
- auto pyColorSpacePrimaries = py::enum_<ColorSpace::Primaries>(pyColorSpace, "Primaries");
- auto pyColorSpaceTransferFunction = py::enum_<ColorSpace::TransferFunction>(pyColorSpace, "TransferFunction");
- auto pyColorSpaceYcbcrEncoding = py::enum_<ColorSpace::YcbcrEncoding>(pyColorSpace, "YcbcrEncoding");
- auto pyColorSpaceRange = py::enum_<ColorSpace::Range>(pyColorSpace, "Range");
auto pyPixelFormat = py::class_<PixelFormat>(m, "PixelFormat");
init_py_formats_generated(m);
@@ -181,113 +139,69 @@ PYBIND11_MODULE(_libcamera, m)
/* Classes */
pyCameraManager
.def_static("singleton", []() {
- std::shared_ptr<CameraManager> cm = gCameraManager.lock();
- if (cm)
- return cm;
-
- int fd = eventfd(0, 0);
- if (fd == -1)
- throw std::system_error(errno, std::generic_category(),
- "Failed to create eventfd");
-
- cm = std::shared_ptr<CameraManager>(new CameraManager, [](auto p) {
- close(gEventfd);
- gEventfd = -1;
- delete p;
- });
-
- gEventfd = fd;
- gCameraManager = cm;
-
- int ret = cm->start();
- if (ret)
- throw std::system_error(-ret, std::generic_category(),
- "Failed to start CameraManager");
-
- return cm;
- })
-
- .def_property_readonly("version", &CameraManager::version)
-
- .def_property_readonly("event_fd", [](CameraManager &) {
- return gEventfd;
- })
-
- .def("get_ready_requests", [](CameraManager &) {
- uint8_t buf[8];
-
- if (read(gEventfd, buf, 8) != 8)
- throw std::system_error(errno, std::generic_category());
-
- std::vector<Request *> v;
-
- {
- std::lock_guard guard(gReqlistMutex);
- swap(v, gReqList);
- }
-
- std::vector<py::object> ret;
+ std::shared_ptr<PyCameraManager> cm = gCameraManager.lock();
- for (Request *req : v) {
- py::object o = py::cast(req);
- /* Decrease the ref increased in Camera.queue_request() */
- o.dec_ref();
- ret.push_back(o);
+ if (!cm) {
+ cm = std::make_shared<PyCameraManager>();
+ gCameraManager = cm;
}
- return ret;
+ return cm;
})
- .def("get", py::overload_cast<const std::string &>(&CameraManager::get), py::keep_alive<0, 1>())
+ .def_property_readonly_static("version", [](py::object /* self */) { return PyCameraManager::version(); })
+ .def("get", &PyCameraManager::get, py::keep_alive<0, 1>())
+ .def_property_readonly("cameras", &PyCameraManager::cameras)
- /* Create a list of Cameras, where each camera has a keep-alive to CameraManager */
- .def_property_readonly("cameras", [](CameraManager &self) {
- py::list l;
-
- for (auto &c : self.cameras()) {
- py::object py_cm = py::cast(self);
- py::object py_cam = py::cast(c);
- py::detail::keep_alive_impl(py_cam, py_cm);
- l.append(py_cam);
- }
-
- return l;
- });
+ .def_property_readonly("event_fd", &PyCameraManager::eventFd)
+ .def("get_ready_requests", &PyCameraManager::getReadyRequests);
pyCamera
.def_property_readonly("id", &Camera::id)
- .def("acquire", &Camera::acquire)
- .def("release", &Camera::release)
+ .def("acquire", [](Camera &self) {
+ int ret = self.acquire();
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to acquire camera");
+ })
+ .def("release", [](Camera &self) {
+ int ret = self.release();
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to release camera");
+ })
.def("start", [](Camera &self,
const std::unordered_map<const ControlId *, py::object> &controls) {
/* \todo What happens if someone calls start() multiple times? */
- self.requestCompleted.connect(handleRequestCompleted);
+ auto cm = gCameraManager.lock();
+ ASSERT(cm);
+
+ self.requestCompleted.connect(cm.get(), &PyCameraManager::handleRequestCompleted);
ControlList controlList(self.controls());
- for (const auto& [id, obj]: controls) {
+ for (const auto &[id, obj] : controls) {
auto val = pyToControlValue(obj, id->type());
controlList.set(id->id(), val);
}
int ret = self.start(&controlList);
if (ret) {
- self.requestCompleted.disconnect(handleRequestCompleted);
- return ret;
+ self.requestCompleted.disconnect();
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to start camera");
}
-
- return 0;
}, py::arg("controls") = std::unordered_map<const ControlId *, py::object>())
.def("stop", [](Camera &self) {
int ret = self.stop();
- if (ret)
- return ret;
- self.requestCompleted.disconnect(handleRequestCompleted);
+ self.requestCompleted.disconnect();
- return 0;
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to stop camera");
})
.def("__str__", [](Camera &self) {
@@ -295,10 +209,24 @@ PYBIND11_MODULE(_libcamera, m)
})
/* Keep the camera alive, as StreamConfiguration contains a Stream* */
- .def("generate_configuration", &Camera::generateConfiguration, py::keep_alive<0, 1>())
- .def("configure", &Camera::configure)
+ .def("generate_configuration", [](Camera &self, const std::vector<StreamRole> &roles) {
+ return self.generateConfiguration(roles);
+ }, py::keep_alive<0, 1>())
+
+ .def("configure", [](Camera &self, CameraConfiguration *config) {
+ int ret = self.configure(config);
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to configure camera");
+ })
- .def("create_request", &Camera::createRequest, py::arg("cookie") = 0)
+ .def("create_request", [](Camera &self, uint64_t cookie) {
+ std::unique_ptr<Request> req = self.createRequest(cookie);
+ if (!req)
+ throw std::system_error(ENOMEM, std::generic_category(),
+ "Failed to create request");
+ return req;
+ }, py::arg("cookie") = 0)
.def("queue_request", [](Camera &self, Request *req) {
py::object py_req = py::cast(req);
@@ -311,10 +239,11 @@ PYBIND11_MODULE(_libcamera, m)
py_req.inc_ref();
int ret = self.queueRequest(req);
- if (ret)
+ if (ret) {
py_req.dec_ref();
-
- return ret;
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to queue request");
+ }
})
.def_property_readonly("streams", [](Camera &self) {
@@ -353,6 +282,40 @@ PYBIND11_MODULE(_libcamera, m)
return ret;
});
+ pySensorConfiguration
+ .def(py::init<>())
+ .def_readwrite("bit_depth", &SensorConfiguration::bitDepth)
+ .def_readwrite("analog_crop", &SensorConfiguration::analogCrop)
+ .def_property(
+ "binning",
+ [](SensorConfiguration &self) {
+ return py::make_tuple(self.binning.binX, self.binning.binY);
+ },
+ [](SensorConfiguration &self, py::object value) {
+ auto vec = value.cast<std::vector<unsigned int>>();
+ if (vec.size() != 2)
+ throw std::runtime_error("binning requires iterable of 2 values");
+ self.binning.binX = vec[0];
+ self.binning.binY = vec[1];
+ })
+ .def_property(
+ "skipping",
+ [](SensorConfiguration &self) {
+ return py::make_tuple(self.skipping.xOddInc, self.skipping.xEvenInc,
+ self.skipping.yOddInc, self.skipping.yEvenInc);
+ },
+ [](SensorConfiguration &self, py::object value) {
+ auto vec = value.cast<std::vector<unsigned int>>();
+ if (vec.size() != 4)
+ throw std::runtime_error("skipping requires iterable of 4 values");
+ self.skipping.xOddInc = vec[0];
+ self.skipping.xEvenInc = vec[1];
+ self.skipping.yOddInc = vec[2];
+ self.skipping.yEvenInc = vec[3];
+ })
+ .def_readwrite("output_size", &SensorConfiguration::outputSize)
+ .def("is_valid", &SensorConfiguration::isValid);
+
pyCameraConfiguration
.def("__iter__", [](CameraConfiguration &self) {
return py::make_iterator<py::return_value_policy::reference_internal>(self);
@@ -365,7 +328,8 @@ PYBIND11_MODULE(_libcamera, m)
py::return_value_policy::reference_internal)
.def_property_readonly("size", &CameraConfiguration::size)
.def_property_readonly("empty", &CameraConfiguration::empty)
- .def_readwrite("transform", &CameraConfiguration::transform);
+ .def_readwrite("sensor_config", &CameraConfiguration::sensorConfig)
+ .def_readwrite("orientation", &CameraConfiguration::orientation);
pyCameraConfigurationStatus
.value("Valid", CameraConfiguration::Valid)
@@ -391,8 +355,14 @@ PYBIND11_MODULE(_libcamera, m)
.def("range", &StreamFormats::range);
pyFrameBufferAllocator
- .def(py::init<std::shared_ptr<Camera>>(), py::keep_alive<1, 2>())
- .def("allocate", &FrameBufferAllocator::allocate)
+ .def(py::init<PyCameraSmartPtr<Camera>>(), py::keep_alive<1, 2>())
+ .def("allocate", [](FrameBufferAllocator &self, Stream *stream) {
+ int ret = self.allocate(stream);
+ if (ret < 0)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to allocate buffers");
+ return ret;
+ })
.def_property_readonly("allocated", &FrameBufferAllocator::allocated)
/* Create a list of FrameBuffers, where each FrameBuffer has a keep-alive to FrameBufferAllocator */
.def("buffers", [](FrameBufferAllocator &self, Stream *stream) {
@@ -469,11 +439,15 @@ PYBIND11_MODULE(_libcamera, m)
pyRequest
/* \todo Fence is not supported, so we cannot expose addBuffer() directly */
.def("add_buffer", [](Request &self, const Stream *stream, FrameBuffer *buffer) {
- return self.addBuffer(stream, buffer);
+ int ret = self.addBuffer(stream, buffer);
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to add buffer");
}, py::keep_alive<1, 3>()) /* Request keeps Framebuffer alive */
.def_property_readonly("status", &Request::status)
.def_property_readonly("buffers", &Request::buffers)
.def_property_readonly("cookie", &Request::cookie)
+ .def_property_readonly("sequence", &Request::sequence)
.def_property_readonly("has_pending_buffers", &Request::hasPendingBuffers)
.def("set_control", [](Request &self, const ControlId &id, py::object value) {
self.controls().set(id.id(), pyToControlValue(value, id.type()));
@@ -526,109 +500,6 @@ PYBIND11_MODULE(_libcamera, m)
pyFrameMetadataPlane
.def_readwrite("bytes_used", &FrameMetadata::Plane::bytesused);
- pyTransform
- .def(py::init([](int rotation, bool hflip, bool vflip, bool transpose) {
- bool ok;
-
- Transform t = transformFromRotation(rotation, &ok);
- if (!ok)
- throw std::invalid_argument("Invalid rotation");
-
- if (hflip)
- t ^= Transform::HFlip;
- if (vflip)
- t ^= Transform::VFlip;
- if (transpose)
- t ^= Transform::Transpose;
- return t;
- }), py::arg("rotation") = 0, py::arg("hflip") = false,
- py::arg("vflip") = false, py::arg("transpose") = false)
- .def(py::init([](Transform &other) { return other; }))
- .def("__str__", [](Transform &self) {
- return "<libcamera.Transform '" + std::string(transformToString(self)) + "'>";
- })
- .def_property("hflip",
- [](Transform &self) {
- return !!(self & Transform::HFlip);
- },
- [](Transform &self, bool hflip) {
- if (hflip)
- self |= Transform::HFlip;
- else
- self &= ~Transform::HFlip;
- })
- .def_property("vflip",
- [](Transform &self) {
- return !!(self & Transform::VFlip);
- },
- [](Transform &self, bool vflip) {
- if (vflip)
- self |= Transform::VFlip;
- else
- self &= ~Transform::VFlip;
- })
- .def_property("transpose",
- [](Transform &self) {
- return !!(self & Transform::Transpose);
- },
- [](Transform &self, bool transpose) {
- if (transpose)
- self |= Transform::Transpose;
- else
- self &= ~Transform::Transpose;
- })
- .def("inverse", [](Transform &self) { return -self; })
- .def("invert", [](Transform &self) {
- self = -self;
- })
- .def("compose", [](Transform &self, Transform &other) {
- self = self * other;
- });
-
- pyColorSpace
- .def(py::init([](ColorSpace::Primaries primaries,
- ColorSpace::TransferFunction transferFunction,
- ColorSpace::YcbcrEncoding ycbcrEncoding,
- ColorSpace::Range range) {
- return ColorSpace(primaries, transferFunction, ycbcrEncoding, range);
- }), py::arg("primaries"), py::arg("transferFunction"),
- py::arg("ycbcrEncoding"), py::arg("range"))
- .def(py::init([](ColorSpace &other) { return other; }))
- .def("__str__", [](ColorSpace &self) {
- return "<libcamera.ColorSpace '" + self.toString() + "'>";
- })
- .def_readwrite("primaries", &ColorSpace::primaries)
- .def_readwrite("transferFunction", &ColorSpace::transferFunction)
- .def_readwrite("ycbcrEncoding", &ColorSpace::ycbcrEncoding)
- .def_readwrite("range", &ColorSpace::range)
- .def_static("Raw", []() { return ColorSpace::Raw; })
- .def_static("Jpeg", []() { return ColorSpace::Jpeg; })
- .def_static("Srgb", []() { return ColorSpace::Srgb; })
- .def_static("Smpte170m", []() { return ColorSpace::Smpte170m; })
- .def_static("Rec709", []() { return ColorSpace::Rec709; })
- .def_static("Rec2020", []() { return ColorSpace::Rec2020; });
-
- pyColorSpacePrimaries
- .value("Raw", ColorSpace::Primaries::Raw)
- .value("Smpte170m", ColorSpace::Primaries::Smpte170m)
- .value("Rec709", ColorSpace::Primaries::Rec709)
- .value("Rec2020", ColorSpace::Primaries::Rec2020);
-
- pyColorSpaceTransferFunction
- .value("Linear", ColorSpace::TransferFunction::Linear)
- .value("Srgb", ColorSpace::TransferFunction::Srgb)
- .value("Rec709", ColorSpace::TransferFunction::Rec709);
-
- pyColorSpaceYcbcrEncoding
- .value("Null", ColorSpace::YcbcrEncoding::None)
- .value("Rec601", ColorSpace::YcbcrEncoding::Rec601)
- .value("Rec709", ColorSpace::YcbcrEncoding::Rec709)
- .value("Rec2020", ColorSpace::YcbcrEncoding::Rec2020);
-
- pyColorSpaceRange
- .value("Full", ColorSpace::Range::Full)
- .value("Limited", ColorSpace::Range::Limited);
-
pyPixelFormat
.def(py::init<>())
.def(py::init<uint32_t, uint64_t>())
diff --git a/src/py/libcamera/py_main.h b/src/py/libcamera/py_main.h
new file mode 100644
index 00000000..5bb5f2d1
--- /dev/null
+++ b/src/py/libcamera/py_main.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#pragma once
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Python)
+
+}
diff --git a/src/py/libcamera/py_properties_generated.cpp.in b/src/py/libcamera/py_properties_generated.cpp.in
index 044b2b2a..e3802b81 100644
--- a/src/py/libcamera/py_properties_generated.cpp.in
+++ b/src/py/libcamera/py_properties_generated.cpp.in
@@ -9,7 +9,7 @@
#include <libcamera/property_ids.h>
-#include <pybind11/smart_holder.h>
+#include <pybind11/pybind11.h>
namespace py = pybind11;
@@ -17,14 +17,12 @@ class PyProperties
{
};
-class PyDraftProperties
-{
-};
+${vendors_class_def}
void init_py_properties_generated(py::module& m)
{
auto controls = py::class_<PyProperties>(m, "properties");
- auto draft = py::class_<PyDraftProperties>(controls, "draft");
+${vendors_defs}
${controls}
}
diff --git a/src/py/libcamera/py_transform.cpp b/src/py/libcamera/py_transform.cpp
new file mode 100644
index 00000000..f3a0bfaf
--- /dev/null
+++ b/src/py/libcamera/py_transform.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings - Transform class
+ */
+
+#include <libcamera/transform.h>
+#include <libcamera/libcamera.h>
+
+#include <pybind11/operators.h>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+void init_py_transform(py::module &m)
+{
+ auto pyTransform = py::class_<Transform>(m, "Transform");
+
+ pyTransform
+ .def(py::init([](int rotation, bool hflip, bool vflip, bool transpose) {
+ bool ok;
+
+ Transform t = transformFromRotation(rotation, &ok);
+ if (!ok)
+ throw std::invalid_argument("Invalid rotation");
+
+ if (hflip)
+ t ^= Transform::HFlip;
+ if (vflip)
+ t ^= Transform::VFlip;
+ if (transpose)
+ t ^= Transform::Transpose;
+ return t;
+ }), py::arg("rotation") = 0, py::arg("hflip") = false,
+ py::arg("vflip") = false, py::arg("transpose") = false)
+ .def(py::init([](Transform &other) { return other; }))
+ .def("__str__", [](Transform &self) {
+ return "<libcamera.Transform '" + std::string(transformToString(self)) + "'>";
+ })
+ .def_property("hflip",
+ [](Transform &self) {
+ return !!(self & Transform::HFlip);
+ },
+ [](Transform &self, bool hflip) {
+ if (hflip)
+ self |= Transform::HFlip;
+ else
+ self &= ~Transform::HFlip;
+ })
+ .def_property("vflip",
+ [](Transform &self) {
+ return !!(self & Transform::VFlip);
+ },
+ [](Transform &self, bool vflip) {
+ if (vflip)
+ self |= Transform::VFlip;
+ else
+ self &= ~Transform::VFlip;
+ })
+ .def_property("transpose",
+ [](Transform &self) {
+ return !!(self & Transform::Transpose);
+ },
+ [](Transform &self, bool transpose) {
+ if (transpose)
+ self |= Transform::Transpose;
+ else
+ self &= ~Transform::Transpose;
+ })
+ .def("inverse", [](Transform &self) { return -self; })
+ .def("invert", [](Transform &self) {
+ self = -self;
+ })
+ .def("compose", [](Transform &self, Transform &other) {
+ self = self * other;
+ });
+}
diff --git a/src/py/meson.build b/src/py/meson.build
index 4ce9668c..a4586b4a 100644
--- a/src/py/meson.build
+++ b/src/py/meson.build
@@ -1 +1,3 @@
+# SPDX-License-Identifier: CC0-1.0
+
subdir('libcamera')
diff --git a/src/v4l2/meson.build b/src/v4l2/meson.build
index f132103c..58f53bf3 100644
--- a/src/v4l2/meson.build
+++ b/src/v4l2/meson.build
@@ -24,6 +24,7 @@ v4l2_compat_cpp_args = [
'-U_FILE_OFFSET_BITS',
'-D_FILE_OFFSET_BITS=32',
'-D_LARGEFILE64_SOURCE',
+ '-U_TIME_BITS',
'-fvisibility=hidden',
]
@@ -31,6 +32,7 @@ v4l2_compat = shared_library('v4l2-compat',
v4l2_compat_sources,
name_prefix : '',
install : true,
+ install_dir : libcamera_libexecdir,
dependencies : [libcamera_private, libdl],
cpp_args : v4l2_compat_cpp_args)
@@ -38,9 +40,10 @@ v4l2_compat = shared_library('v4l2-compat',
# adaptation layer.
cdata = configuration_data()
-cdata.set('LIBCAMERA_V4L2_SO', get_option('prefix') / get_option('libdir') / 'v4l2-compat.so')
+cdata.set('LIBCAMERA_V4L2_SO', get_option('prefix') / libcamera_libexecdir / 'v4l2-compat.so')
configure_file(input : 'libcamerify.in',
output : 'libcamerify',
configuration : cdata,
- install_dir : get_option('bindir'))
+ install_dir : get_option('bindir'),
+ install_tag : 'bin')
diff --git a/src/v4l2/v4l2_camera.cpp b/src/v4l2/v4l2_camera.cpp
index e922b9e6..0f3b862f 100644
--- a/src/v4l2/v4l2_camera.cpp
+++ b/src/v4l2/v4l2_camera.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera.cpp - V4L2 compatibility camera
+ * V4L2 compatibility camera
*/
#include "v4l2_camera.h"
@@ -71,11 +71,10 @@ std::vector<V4L2Camera::Buffer> V4L2Camera::completedBuffers()
{
std::vector<Buffer> v;
- bufferLock_.lock();
+ MutexLocker lock(bufferLock_);
for (std::unique_ptr<Buffer> &metadata : completedBuffers_)
v.push_back(*metadata.get());
completedBuffers_.clear();
- bufferLock_.unlock();
return v;
}
@@ -278,7 +277,7 @@ int V4L2Camera::qbuf(unsigned int index)
void V4L2Camera::waitForBufferAvailable()
{
MutexLocker locker(bufferMutex_);
- bufferCV_.wait(locker, [&] {
+ bufferCV_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(bufferMutex_) {
return bufferAvailableCount_ >= 1 || !isRunning_;
});
if (isRunning_)
diff --git a/src/v4l2/v4l2_camera.h b/src/v4l2/v4l2_camera.h
index 03e74118..278cc33e 100644
--- a/src/v4l2/v4l2_camera.h
+++ b/src/v4l2/v4l2_camera.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera.h - V4L2 compatibility camera
+ * V4L2 compatibility camera
*/
#pragma once
@@ -39,7 +39,7 @@ public:
void bind(int efd);
void unbind();
- std::vector<Buffer> completedBuffers();
+ std::vector<Buffer> completedBuffers() LIBCAMERA_TSA_EXCLUDES(bufferLock_);
int configure(libcamera::StreamConfiguration *streamConfigOut,
const libcamera::Size &size,
@@ -58,13 +58,14 @@ public:
int qbuf(unsigned int index);
- void waitForBufferAvailable();
- bool isBufferAvailable();
+ void waitForBufferAvailable() LIBCAMERA_TSA_EXCLUDES(bufferMutex_);
+ bool isBufferAvailable() LIBCAMERA_TSA_EXCLUDES(bufferMutex_);
bool isRunning();
private:
- void requestComplete(libcamera::Request *request);
+ void requestComplete(libcamera::Request *request)
+ LIBCAMERA_TSA_EXCLUDES(bufferLock_);
std::shared_ptr<libcamera::Camera> camera_;
std::unique_ptr<libcamera::CameraConfiguration> config_;
@@ -77,11 +78,12 @@ private:
std::vector<std::unique_ptr<libcamera::Request>> requestPool_;
std::deque<libcamera::Request *> pendingRequests_;
- std::deque<std::unique_ptr<Buffer>> completedBuffers_;
+ std::deque<std::unique_ptr<Buffer>> completedBuffers_
+ LIBCAMERA_TSA_GUARDED_BY(bufferLock_);
int efd_;
libcamera::Mutex bufferMutex_;
libcamera::ConditionVariable bufferCV_;
- unsigned int bufferAvailableCount_;
+ unsigned int bufferAvailableCount_ LIBCAMERA_TSA_GUARDED_BY(bufferMutex_);
};
diff --git a/src/v4l2/v4l2_camera_file.cpp b/src/v4l2/v4l2_camera_file.cpp
index 0a41587c..d8fe854b 100644
--- a/src/v4l2/v4l2_camera_file.cpp
+++ b/src/v4l2/v4l2_camera_file.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * v4l2_camera_file.h - V4L2 compatibility camera file information
+ * V4L2 compatibility camera file information
*/
#include "v4l2_camera_file.h"
diff --git a/src/v4l2/v4l2_camera_file.h b/src/v4l2/v4l2_camera_file.h
index 1a7b6a63..1212989e 100644
--- a/src/v4l2/v4l2_camera_file.h
+++ b/src/v4l2/v4l2_camera_file.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * v4l2_camera_file.h - V4L2 compatibility camera file information
+ * V4L2 compatibility camera file information
*/
#pragma once
diff --git a/src/v4l2/v4l2_camera_proxy.cpp b/src/v4l2/v4l2_camera_proxy.cpp
index 26a227da..3f7c00a2 100644
--- a/src/v4l2/v4l2_camera_proxy.cpp
+++ b/src/v4l2/v4l2_camera_proxy.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera_proxy.cpp - Proxy to V4L2 compatibility camera
+ * Proxy to V4L2 compatibility camera
*/
#include "v4l2_camera_proxy.h"
@@ -182,7 +182,7 @@ void V4L2CameraProxy::setFmtFromConfig(const StreamConfiguration &streamConfig)
v4l2PixFormat_.width = size.width;
v4l2PixFormat_.height = size.height;
- v4l2PixFormat_.pixelformat = V4L2PixelFormat::fromPixelFormat(streamConfig.pixelFormat);
+ v4l2PixFormat_.pixelformat = V4L2PixelFormat::fromPixelFormat(streamConfig.pixelFormat)[0];
v4l2PixFormat_.field = V4L2_FIELD_NONE;
v4l2PixFormat_.bytesperline = streamConfig.stride;
v4l2PixFormat_.sizeimage = streamConfig.frameSize;
@@ -290,7 +290,7 @@ int V4L2CameraProxy::vidioc_enum_fmt(V4L2CameraFile *file, struct v4l2_fmtdesc *
return -EINVAL;
PixelFormat format = streamConfig_.formats().pixelformats()[arg->index];
- V4L2PixelFormat v4l2Format = V4L2PixelFormat::fromPixelFormat(format);
+ V4L2PixelFormat v4l2Format = V4L2PixelFormat::fromPixelFormat(format)[0];
arg->flags = format == formats::MJPEG ? V4L2_FMT_FLAG_COMPRESSED : 0;
utils::strlcpy(reinterpret_cast<char *>(arg->description),
@@ -333,7 +333,7 @@ int V4L2CameraProxy::tryFormat(struct v4l2_format *arg)
arg->fmt.pix.width = config.size.width;
arg->fmt.pix.height = config.size.height;
- arg->fmt.pix.pixelformat = V4L2PixelFormat::fromPixelFormat(config.pixelFormat);
+ arg->fmt.pix.pixelformat = V4L2PixelFormat::fromPixelFormat(config.pixelFormat)[0];
arg->fmt.pix.field = V4L2_FIELD_NONE;
arg->fmt.pix.bytesperline = config.stride;
arg->fmt.pix.sizeimage = config.frameSize;
@@ -778,10 +778,20 @@ const std::set<unsigned long> V4L2CameraProxy::supportedIoctls_ = {
VIDIOC_STREAMOFF,
};
-int V4L2CameraProxy::ioctl(V4L2CameraFile *file, unsigned long request, void *arg)
+int V4L2CameraProxy::ioctl(V4L2CameraFile *file, unsigned long longRequest, void *arg)
{
MutexLocker locker(proxyMutex_);
+ /*
+ * The Linux Kernel only processes 32 bits of an IOCTL.
+ *
+ * Prevent unexpected sign-extensions that could occur if applications
+ * use a signed int for the ioctl request, which would sign-extend to
+ * an incorrect value for unsigned longs on 64 bit architectures by
+ * explicitly casting as an unsigned int here.
+ */
+ unsigned int request = longRequest;
+
if (!arg && (_IOC_DIR(request) & _IOC_WRITE)) {
errno = EFAULT;
return -1;
diff --git a/src/v4l2/v4l2_camera_proxy.h b/src/v4l2/v4l2_camera_proxy.h
index 76ca2d8a..3d8784df 100644
--- a/src/v4l2/v4l2_camera_proxy.h
+++ b/src/v4l2/v4l2_camera_proxy.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera_proxy.h - Proxy to V4L2 compatibility camera
+ * Proxy to V4L2 compatibility camera
*/
#pragma once
@@ -27,13 +27,15 @@ class V4L2CameraProxy
public:
V4L2CameraProxy(unsigned int index, std::shared_ptr<libcamera::Camera> camera);
- int open(V4L2CameraFile *file);
- void close(V4L2CameraFile *file);
+ int open(V4L2CameraFile *file) LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
+ void close(V4L2CameraFile *file) LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
void *mmap(V4L2CameraFile *file, void *addr, size_t length, int prot,
- int flags, off64_t offset);
- int munmap(V4L2CameraFile *file, void *addr, size_t length);
+ int flags, off64_t offset) LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
+ int munmap(V4L2CameraFile *file, void *addr, size_t length)
+ LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
- int ioctl(V4L2CameraFile *file, unsigned long request, void *arg);
+ int ioctl(V4L2CameraFile *file, unsigned long request, void *arg)
+ LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
private:
bool validateBufferType(uint32_t type);
diff --git a/src/v4l2/v4l2_compat.cpp b/src/v4l2/v4l2_compat.cpp
index 1765fb5d..8e2b7e92 100644
--- a/src/v4l2/v4l2_compat.cpp
+++ b/src/v4l2/v4l2_compat.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat.cpp - V4L2 compatibility layer
+ * V4L2 compatibility layer
*/
#include "v4l2_compat_manager.h"
diff --git a/src/v4l2/v4l2_compat_manager.cpp b/src/v4l2/v4l2_compat_manager.cpp
index 0f7575c5..6a00afb5 100644
--- a/src/v4l2/v4l2_compat_manager.cpp
+++ b/src/v4l2/v4l2_compat_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat_manager.cpp - V4L2 compatibility manager
+ * V4L2 compatibility manager
*/
#include "v4l2_compat_manager.h"
@@ -24,6 +24,7 @@
#include <libcamera/camera.h>
#include <libcamera/camera_manager.h>
+#include <libcamera/property_ids.h>
#include "v4l2_camera_file.h"
@@ -113,14 +114,35 @@ int V4L2CompatManager::getCameraIndex(int fd)
if (ret < 0)
return -1;
- std::shared_ptr<Camera> target = cm_->get(statbuf.st_rdev);
- if (!target)
- return -1;
+ const dev_t devnum = statbuf.st_rdev;
+ /*
+ * Iterate each known camera and identify if it reports this nodes
+ * device number in its list of SystemDevices.
+ */
auto cameras = cm_->cameras();
for (auto [index, camera] : utils::enumerate(cameras)) {
- if (camera == target)
- return index;
+ Span<const int64_t> devices = camera->properties()
+ .get(properties::SystemDevices)
+ .value_or(Span<int64_t>{});
+
+ /*
+ * While there may be multiple cameras that could reference the
+ * same device node, we take a first match as a best effort for
+ * now.
+ *
+ * \todo Each camera can be accessed through any of the video
+ * device nodes that it uses. This may confuse applications.
+ * Consider reworking the V4L2 adaptation layer to instead
+ * expose each Camera instance through a single video device
+ * node (with a consistent and stable mapping). The other
+ * device nodes could possibly be hidden from the application
+ * by intercepting additional calls to the C library.
+ */
+ for (const int64_t dev : devices) {
+ if (dev == static_cast<int64_t>(devnum))
+ return index;
+ }
}
return -1;
diff --git a/src/v4l2/v4l2_compat_manager.h b/src/v4l2/v4l2_compat_manager.h
index 64af9a8c..f7c6f122 100644
--- a/src/v4l2/v4l2_compat_manager.h
+++ b/src/v4l2/v4l2_compat_manager.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat_manager.h - V4L2 compatibility manager
+ * V4L2 compatibility manager
*/
#pragma once